linux-stable/drivers/net/dsa/qca8k.c

1821 lines
44 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <john@phrozen.org>
*/
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/if_bridge.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include "qca8k.h"
#define MIB_DESC(_s, _o, _n) \
{ \
.size = (_s), \
.offset = (_o), \
.name = (_n), \
}
static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x00, "RxBroad"),
MIB_DESC(1, 0x04, "RxPause"),
MIB_DESC(1, 0x08, "RxMulti"),
MIB_DESC(1, 0x0c, "RxFcsErr"),
MIB_DESC(1, 0x10, "RxAlignErr"),
MIB_DESC(1, 0x14, "RxRunt"),
MIB_DESC(1, 0x18, "RxFragment"),
MIB_DESC(1, 0x1c, "Rx64Byte"),
MIB_DESC(1, 0x20, "Rx128Byte"),
MIB_DESC(1, 0x24, "Rx256Byte"),
MIB_DESC(1, 0x28, "Rx512Byte"),
MIB_DESC(1, 0x2c, "Rx1024Byte"),
MIB_DESC(1, 0x30, "Rx1518Byte"),
MIB_DESC(1, 0x34, "RxMaxByte"),
MIB_DESC(1, 0x38, "RxTooLong"),
MIB_DESC(2, 0x3c, "RxGoodByte"),
MIB_DESC(2, 0x44, "RxBadByte"),
MIB_DESC(1, 0x4c, "RxOverFlow"),
MIB_DESC(1, 0x50, "Filtered"),
MIB_DESC(1, 0x54, "TxBroad"),
MIB_DESC(1, 0x58, "TxPause"),
MIB_DESC(1, 0x5c, "TxMulti"),
MIB_DESC(1, 0x60, "TxUnderRun"),
MIB_DESC(1, 0x64, "Tx64Byte"),
MIB_DESC(1, 0x68, "Tx128Byte"),
MIB_DESC(1, 0x6c, "Tx256Byte"),
MIB_DESC(1, 0x70, "Tx512Byte"),
MIB_DESC(1, 0x74, "Tx1024Byte"),
MIB_DESC(1, 0x78, "Tx1518Byte"),
MIB_DESC(1, 0x7c, "TxMaxByte"),
MIB_DESC(1, 0x80, "TxOverSize"),
MIB_DESC(2, 0x84, "TxByte"),
MIB_DESC(1, 0x8c, "TxCollision"),
MIB_DESC(1, 0x90, "TxAbortCol"),
MIB_DESC(1, 0x94, "TxMultiCol"),
MIB_DESC(1, 0x98, "TxSingleCol"),
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
};
/* The 32bit switch registers are accessed indirectly. To achieve this we need
* to set the page of the register. Track the last page that was set to reduce
* mdio writes
*/
static u16 qca8k_current_page = 0xffff;
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
regaddr >>= 1;
*r1 = regaddr & 0x1e;
regaddr >>= 5;
*r2 = regaddr & 0x7;
regaddr >>= 3;
*page = regaddr & 0x3ff;
}
static u32
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
{
u32 val;
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret >= 0) {
val = ret;
ret = bus->read(bus, phy_id, regnum + 1);
val |= ret << 16;
}
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit register\n");
return ret;
}
return val;
}
static void
qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
u16 lo, hi;
int ret;
lo = val & 0xffff;
hi = (u16)(val >> 16);
ret = bus->write(bus, phy_id, regnum, lo);
if (ret >= 0)
ret = bus->write(bus, phy_id, regnum + 1, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit register\n");
}
static int
qca8k_set_page(struct mii_bus *bus, u16 page)
{
int ret;
if (page == qca8k_current_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to set qca8k page\n");
return ret;
}
qca8k_current_page = page;
return 0;
}
static u32
qca8k_read(struct qca8k_priv *priv, u32 reg)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
val = qca8k_set_page(bus, page);
if (val < 0)
goto exit;
val = qca8k_mii_read32(bus, 0x10 | r2, r1);
exit:
mutex_unlock(&bus->mdio_lock);
return val;
}
static int
qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
if (ret < 0)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
if (ret < 0)
goto exit;
val = qca8k_mii_read32(bus, 0x10 | r2, r1);
if (val < 0) {
ret = val;
goto exit;
}
val &= ~mask;
val |= write_val;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
{
return qca8k_rmw(priv, reg, 0, val);
}
static int
qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
{
return qca8k_rmw(priv, reg, val, 0);
}
static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
int ret;
ret = qca8k_read(priv, reg);
if (ret < 0)
return ret;
*val = ret;
return 0;
}
static int
qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
return qca8k_write(priv, reg, val);
}
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
regmap_reg_range(0x0200, 0x0270), /* Parser control */
regmap_reg_range(0x0400, 0x0454), /* ACL */
regmap_reg_range(0x0600, 0x0718), /* Lookup */
regmap_reg_range(0x0800, 0x0b70), /* QM */
regmap_reg_range(0x0c00, 0x0c80), /* PKT */
regmap_reg_range(0x0e00, 0x0e98), /* L3 */
regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
};
static const struct regmap_access_table qca8k_readable_table = {
.yes_ranges = qca8k_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
.reg_read = qca8k_regmap_read,
.reg_write = qca8k_regmap_write,
.rd_table = &qca8k_readable_table,
};
static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
u32 val;
int ret;
ret = read_poll_timeout(qca8k_read, val, !(val & mask),
0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
priv, reg);
/* Check if qca8k_read has failed for a different reason
* before returning -ETIMEDOUT
*/
if (ret < 0 && val < 0)
return val;
return ret;
}
static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
u32 reg[4], val;
int i;
/* load the ARL table into an array */
for (i = 0; i < 4; i++) {
val = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
if (val < 0)
return val;
reg[i] = val;
}
/* vid - 83:72 */
fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
/* aging - 67:64 */
fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
/* portmask - 54:48 */
fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
/* mac - 47:0 */
fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
fdb->mac[1] = reg[1] & 0xff;
fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
fdb->mac[5] = reg[0] & 0xff;
return 0;
}
static void
qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
u8 aging)
{
u32 reg[3] = { 0 };
int i;
/* vid - 83:72 */
reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
/* aging - 67:64 */
reg[2] |= aging & QCA8K_ATU_STATUS_M;
/* portmask - 54:48 */
reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
/* mac - 47:0 */
reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
reg[1] |= mac[1];
reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
reg[0] |= mac[5];
/* load the array into the ARL table */
for (i = 0; i < 3; i++)
qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
}
static int
qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
{
u32 reg;
int ret;
/* Set the command and FDB index */
reg = QCA8K_ATU_FUNC_BUSY;
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
}
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_FDB_LOAD) {
reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
if (reg < 0)
return reg;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
return 0;
}
static int
qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
{
int ret;
qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
if (ret < 0)
return ret;
return qca8k_fdb_read(priv, fdb);
}
static int
qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
u16 vid, u8 aging)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static void
qca8k_fdb_flush(struct qca8k_priv *priv)
{
mutex_lock(&priv->reg_mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
mutex_unlock(&priv->reg_mutex);
}
static int
qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
int ret;
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
reg |= cmd;
reg |= vid << QCA8K_VTU_FUNC1_VID_S;
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1);
if (reg < 0)
return reg;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
return 0;
}
static int
qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
{
u32 reg;
int ret;
/*
We do the right thing with VLAN 0 and treat it as untagged while
preserving the tag on egress.
*/
if (vid == 0)
return 0;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
if (reg < 0)
return reg;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
if (untagged)
reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
else
reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
return ret;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
{
u32 reg, mask;
int ret, i;
bool del;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
if (reg < 0)
return reg;
reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
/* Check if we're the last member to be removed */
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
if ((reg & mask) != mask) {
del = false;
break;
}
}
if (del) {
ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
} else {
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
return ret;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
}
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
qca8k_mib_init(struct qca8k_priv *priv)
{
int ret;
mutex_lock(&priv->reg_mutex);
ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
if (ret)
goto exit;
ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static void
qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
{
u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
/* Port 0 and 6 have no internal PHY */
if (port > 0 && port < 6)
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable)
qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
else
qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
}
static u32
qca8k_port_to_phy(int port)
{
/* From Andrew Lunn:
* Port 0 has no internal phy.
* Port 1 has an internal PHY at MDIO address 0.
* Port 2 has an internal PHY at MDIO address 1.
* ...
* Port 5 has an internal PHY at MDIO address 4.
* Port 6 has no internal PHY.
*/
return port - 1;
}
static int
qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
{
u32 phy, val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
/* callee is responsible for not passing bad ports,
* but we still would like to make spills impossible.
*/
phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
if (ret)
return ret;
return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
}
static int
qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
{
u32 phy, val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
/* callee is responsible for not passing bad ports,
* but we still would like to make spills impossible.
*/
phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
ret = qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
if (ret)
return ret;
ret = qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
return ret;
val = qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL);
if (val < 0)
return val;
val &= QCA8K_MDIO_MASTER_DATA_MASK;
return val;
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
return qca8k_mdio_write(priv, port, regnum, data);
}
static int
qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_mdio_read(priv, port, regnum);
if (ret < 0)
return 0xffff;
return ret;
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
int err;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
if (err) {
of_node_put(port);
of_node_put(ports);
return err;
}
if (!dsa_is_user_port(priv->ds, reg))
continue;
if (of_property_read_bool(port, "phy-handle"))
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
}
of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs. It's not possible to use both
* configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
}
if (external_mdio_mask) {
/* Make sure to disable the internal mdio bus in cases
* a dt-overlay and driver reload changed the configuration
*/
return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_EN);
}
priv->ops.phy_read = qca8k_phy_read;
priv->ops.phy_write = qca8k_phy_write;
return 0;
}
static int
qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
{
struct device_node *port_dn;
phy_interface_t mode;
struct dsa_port *dp;
u32 val;
/* CPU port is already checked */
dp = dsa_to_port(priv->ds, 0);
port_dn = dp->dn;
/* Check if port 0 is set to the correct type */
of_get_phy_mode(port_dn, &mode);
if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
mode != PHY_INTERFACE_MODE_RGMII_RXID &&
mode != PHY_INTERFACE_MODE_RGMII_TXID) {
return 0;
}
switch (mode) {
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
val = 2;
else
/* Switch regs accept value in ns, convert ps to ns */
val = val / 1000;
if (val > QCA8K_MAX_DELAY) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
val = 3;
}
priv->rgmii_rx_delay = val;
/* Stop here if we need to check only for rx delay */
if (mode != PHY_INTERFACE_MODE_RGMII_ID)
break;
fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
val = 1;
else
/* Switch regs accept value in ns, convert ps to ns */
val = val / 1000;
if (val > QCA8K_MAX_DELAY) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
val = 3;
}
priv->rgmii_tx_delay = val;
break;
default:
return 0;
}
return 0;
}
static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int ret, i;
u32 mask;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
dev_err(priv->dev, "port 0 is not the CPU port");
return -EINVAL;
}
mutex_init(&priv->reg_mutex);
/* Start by setting up the register mapping */
priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
&qca8k_regmap_config);
if (IS_ERR(priv->regmap))
dev_warn(priv->dev, "regmap initialization failed");
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
ret = qca8k_setup_of_rgmii_delay(priv);
if (ret)
return ret;
/* Enable CPU Port */
ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
if (ret) {
dev_err(priv->dev, "failed enabling CPU port");
return ret;
}
/* Enable MIB counters */
ret = qca8k_mib_init(priv);
if (ret)
dev_warn(priv->dev, "mib init failed");
/* Enable QCA header mode on the cpu port */
ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
if (ret) {
dev_err(priv->dev, "failed enabling QCA header mode");
return ret;
}
/* Disable forwarding by default on all ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
}
/* Disable MAC by default on all ports */
for (i = 1; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
/* Forward all unknown frames to CPU port for Linux processing */
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
if (ret)
return ret;
/* Setup connection between CPU port & user ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
}
/* Individual user ports get connected to CPU port only */
if (dsa_is_user_port(ds, i)) {
int shift = 16 * (i % 2);
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
BIT(QCA8K_CPU_PORT));
if (ret)
return ret;
/* Enable ARP Auto-learning by default */
ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_LEARN);
if (ret)
return ret;
/* For port based vlans to work we need to set the
* default egress vid
*/
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
0xfff << shift,
QCA8K_PORT_VID_DEF << shift);
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
if (ret)
return ret;
}
}
/* The port 5 of the qca8337 have some problem in flood condition. The
* original legacy driver had some specific buffer and priority settings
* for the different port suggested by the QCA switch team. Add this
* missing settings to improve switch stability under load condition.
* This problem is limited to qca8337 and other qca8k switch are not affected.
*/
if (priv->switch_id == QCA8K_ID_QCA8337) {
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
switch (i) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
*/
case 0:
case 5:
case 6:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
break;
default:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
}
qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN;
qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
QCA8K_PORT_HOL_CTRL1_ING_BUF |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
if (priv->switch_id == QCA8K_ID_QCA8327) {
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
mask);
}
/* Setup our port MTUs to match power on defaults */
for (i = 0; i < QCA8K_NUM_PORTS; i++)
priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
/* Flush the FDB table */
qca8k_fdb_flush(priv);
/* We don't have interrupts for link changes, so we need to poll */
ds->pcs_poll = true;
return 0;
}
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY, nothing to do */
return;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
return;
}
if (port != 6 && phylink_autoneg_inband(mode)) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
}
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
/* RGMII mode means no delay so don't enable the delay */
qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
/* RGMII_ID needs internal delay. This is enabled through
* PORT5_PAD_CTRL for all ports, rather than individual port
* registers
*/
qca8k_write(priv, reg,
QCA8K_PORT_PAD_RGMII_EN |
QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
/* QCA8337 requires to set rgmii rx delay */
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
/* Enable/disable SerDes auto-negotiation as necessary */
val = qca8k_read(priv, QCA8K_REG_PWS);
if (phylink_autoneg_inband(mode))
val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
val |= QCA8K_PWS_SERDES_AEN_DIS;
qca8k_write(priv, QCA8K_REG_PWS, val);
/* Configure the SGMII parameters */
val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL);
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
if (dsa_is_cpu_port(ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_PHY;
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_MAC;
} else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_BASEX;
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
phy_modes(state->interface), port);
return;
}
}
static void
qca8k_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
goto unsupported;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_GMII)
goto unsupported;
break;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
goto unsupported;
break;
default:
unsupported:
linkmode_zero(supported);
return;
}
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
phylink_set(mask, 1000baseX_Full);
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
}
static int
qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port));
if (reg < 0)
return reg;
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
DUPLEX_HALF;
switch (reg & QCA8K_PORT_STATUS_SPEED) {
case QCA8K_PORT_STATUS_SPEED_10:
state->speed = SPEED_10;
break;
case QCA8K_PORT_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case QCA8K_PORT_STATUS_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
state->pause = MLO_PAUSE_NONE;
if (reg & QCA8K_PORT_STATUS_RXFLOW)
state->pause |= MLO_PAUSE_RX;
if (reg & QCA8K_PORT_STATUS_TXFLOW)
state->pause |= MLO_PAUSE_TX;
return 1;
}
static void
qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 0);
}
static void
qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
if (phylink_autoneg_inband(mode)) {
reg = QCA8K_PORT_STATUS_LINK_AUTO;
} else {
switch (speed) {
case SPEED_10:
reg = QCA8K_PORT_STATUS_SPEED_10;
break;
case SPEED_100:
reg = QCA8K_PORT_STATUS_SPEED_100;
break;
case SPEED_1000:
reg = QCA8K_PORT_STATUS_SPEED_1000;
break;
default:
reg = QCA8K_PORT_STATUS_LINK_AUTO;
break;
}
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
if (rx_pause || dsa_is_cpu_port(ds, port))
reg |= QCA8K_PORT_STATUS_RXFLOW;
if (tx_pause || dsa_is_cpu_port(ds, port))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
}
static void
qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u64 hi;
for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
val = qca8k_read(priv, reg);
if (val < 0)
continue;
if (mib->size == 2) {
hi = qca8k_read(priv, reg + 4);
if (hi < 0)
continue;
}
data[i] = val;
if (mib->size == 2)
data[i] |= hi << 32;
}
}
static int
qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(ar8327_mib);
}
static int
qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
u32 reg;
int ret;
mutex_lock(&priv->reg_mutex);
reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
if (reg < 0) {
ret = reg;
goto exit;
}
if (eee->eee_enabled)
reg |= lpi_en;
else
reg &= ~lpi_en;
ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static void
qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
break;
case BR_STATE_LISTENING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
break;
case BR_STATE_LEARNING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
default:
stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
break;
}
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
}
static int
qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask = BIT(QCA8K_CPU_PORT);
int i, ret;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
ret = qca8k_reg_set(priv,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
if (ret)
return ret;
if (i != port)
port_mask |= BIT(i);
}
/* Add all other ports to this ports portvlan mask */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, port_mask);
return ret;
}
static void
qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int i;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
*/
qca8k_reg_clear(priv,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
}
/* Set the cpu port to be the only one in the portvlan mask of
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
}
static int
qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 1);
priv->port_sts[port].enabled = 1;
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
return 0;
}
static void
qca8k_port_disable(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 0);
priv->port_sts[port].enabled = 0;
}
static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
int i, mtu = 0;
priv->port_mtu[port] = new_mtu;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
if (priv->port_mtu[i] > mtu)
mtu = priv->port_mtu[i];
/* Include L2 header / FCS length */
return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
}
static int
qca8k_port_max_mtu(struct dsa_switch *ds, int port)
{
return QCA8K_MAX_MTU;
}
static int
qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
u16 port_mask, u16 vid)
{
/* Set the vid to the port vlan id if no vid is set */
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_add(priv, addr, port_mask, vid,
QCA8K_ATU_STATUS_STATIC);
}
static int
qca8k_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
}
static int
qca8k_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_del(priv, addr, port_mask, vid);
}
static int
qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
struct qca8k_fdb _fdb = { 0 };
int cnt = QCA8K_NUM_FDB_RECORDS;
bool is_static;
int ret = 0;
mutex_lock(&priv->reg_mutex);
while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
if (!_fdb.aging)
break;
is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
ret = cb(_fdb.mac, _fdb.vid, is_static, data);
if (ret)
break;
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
static int
qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int ret;
if (vlan_filtering) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
return ret;
}
static int
qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects The call path of a switchdev VLAN addition to the bridge looks something like this today: nbp_vlan_init | __br_vlan_set_default_pvid | | | | | br_afspec | | | | | | | v | | | br_process_vlan_info | | | | | | | v | | | br_vlan_info | | | / \ / | | / \ / | | / \ / | | / \ / v v v v v nbp_vlan_add br_vlan_add ------+ | ^ ^ | | | / | | | | / / / | \ br_vlan_get_master/ / v \ ^ / / br_vlan_add_existing \ | / / | \ | / / / \ | / / / \ | / / / \ | / / / v | | v / __vlan_add / / | / / | / v | / __vlan_vid_add | / \ | / v v v br_switchdev_port_vlan_add The ranges UAPI was introduced to the bridge in commit bdced7ef7838 ("bridge: support for multiple vlans and vlan ranges in setlink and dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec) have always been passed one by one, through struct bridge_vlan_info tmp_vinfo, to br_vlan_info. So the range never went too far in depth. Then Scott Feldman introduced the switchdev_port_bridge_setlink function in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink"). That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made full use of the range. But switchdev_port_bridge_setlink was called like this: br_setlink -> br_afspec -> switchdev_port_bridge_setlink Basically, the switchdev and the bridge code were not tightly integrated. Then commit 41c498b9359e ("bridge: restore br_setlink back to original") came, and switchdev drivers were required to implement .ndo_bridge_setlink = switchdev_port_bridge_setlink for a while. In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op first in __vlan_vid_add/del") finally made switchdev penetrate the br_vlan_info() barrier and start to develop the call path we have today. But remember, br_vlan_info() still receives VLANs one by one. Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit 29ab586c3d83 ("net: switchdev: Remove bridge bypass support from switchdev") so that drivers would not implement .ndo_bridge_setlink any longer. The switchdev_port_bridge_setlink also got deleted. This refactoring removed the parallel bridge_setlink implementation from switchdev, and left the only switchdev VLAN objects to be the ones offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add (the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about bridge VLANs")). That is to say, today the switchdev VLAN object ranges are not used in the kernel. Refactoring the above call path is a bit complicated, when the bridge VLAN call path is already a bit complicated. Let's go off and finish the job of commit 29ab586c3d83 by deleting the bogus iteration through the VLAN ranges from the drivers. Some aspects of this feature never made too much sense in the first place. For example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID flag supposed to mean, when a port can obviously have a single pvid? This particular configuration _is_ denied as of commit 6623c60dc28e ("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API perspective, the driver still has to play pretend, and only offload the vlan->vid_end as pvid. And the addition of a switchdev VLAN object can modify the flags of another, completely unrelated, switchdev VLAN object! (a VLAN that is PVID will invalidate the PVID flag from whatever other VLAN had previously been offloaded with switchdev and had that flag. Yet switchdev never notifies about that change, drivers are supposed to guess). Nonetheless, having a VLAN range in the API makes error handling look scarier than it really is - unwinding on errors and all of that. When in reality, no one really calls this API with more than one VLAN. It is all unnecessary complexity. And despite appearing pretentious (two-phase transactional model and all), the switchdev API is really sloppy because the VLAN addition and removal operations are not paired with one another (you can add a VLAN 100 times and delete it just once). The bridge notifies through switchdev of a VLAN addition not only when the flags of an existing VLAN change, but also when nothing changes. There are switchdev drivers out there who don't like adding a VLAN that has already been added, and those checks don't really belong at driver level. But the fact that the API contains ranges is yet another factor that prevents this from being addressed in the future. Of the existing switchdev pieces of hardware, it appears that only Mellanox Spectrum supports offloading more than one VLAN at a time, through mlxsw_sp_port_vlan_set. I have kept that code internal to the driver, because there is some more bookkeeping that makes use of it, but I deleted it from the switchdev API. But since the switchdev support for ranges has already been de facto deleted by a Mellanox employee and nobody noticed for 4 years, I'm going to assume it's not a biggie. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 00:01:46 +00:00
ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
net: dsa: remove the transactional logic from VLAN objects It should be the driver's business to logically separate its VLAN offloading into a preparation and a commit phase, and some drivers don't need / can't do this. So remove the transactional shim from DSA and let drivers propagate errors directly from the .port_vlan_add callback. It would appear that the code has worse error handling now than it had before. DSA is the only in-kernel user of switchdev that offloads one switchdev object to more than one port: for every VLAN object offloaded to a user port, that VLAN is also offloaded to the CPU port. So the "prepare for user port -> check for errors -> prepare for CPU port -> check for errors -> commit for user port -> commit for CPU port" sequence appears to make more sense than the one we are using now: "offload to user port -> check for errors -> offload to CPU port -> check for errors", but it is really a compromise. In the new way, we can catch errors from the commit phase that we previously had to ignore. But we have our hands tied and cannot do any rollback now: if we add a VLAN on the CPU port and it fails, we can't do the rollback by simply deleting it from the user port, because the switchdev API is not so nice with us: it could have simply been there already, even with the same flags. So we don't even attempt to rollback anything on addition error, just leave whatever VLANs managed to get offloaded right where they are. This should not be a problem at all in practice. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 00:01:53 +00:00
if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
net: dsa: remove the transactional logic from VLAN objects It should be the driver's business to logically separate its VLAN offloading into a preparation and a commit phase, and some drivers don't need / can't do this. So remove the transactional shim from DSA and let drivers propagate errors directly from the .port_vlan_add callback. It would appear that the code has worse error handling now than it had before. DSA is the only in-kernel user of switchdev that offloads one switchdev object to more than one port: for every VLAN object offloaded to a user port, that VLAN is also offloaded to the CPU port. So the "prepare for user port -> check for errors -> prepare for CPU port -> check for errors -> commit for user port -> commit for CPU port" sequence appears to make more sense than the one we are using now: "offload to user port -> check for errors -> offload to CPU port -> check for errors", but it is really a compromise. In the new way, we can catch errors from the commit phase that we previously had to ignore. But we have our hands tied and cannot do any rollback now: if we add a VLAN on the CPU port and it fails, we can't do the rollback by simply deleting it from the user port, because the switchdev API is not so nice with us: it could have simply been there already, even with the same flags. So we don't even attempt to rollback anything on addition error, just leave whatever VLANs managed to get offloaded right where they are. This should not be a problem at all in practice. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 00:01:53 +00:00
return ret;
}
if (pvid) {
int shift = 16 * (port % 2);
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
0xfff << shift, vlan->vid << shift);
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
QCA8K_PORT_VLAN_CVID(vlan->vid) |
QCA8K_PORT_VLAN_SVID(vlan->vid));
}
net: dsa: remove the transactional logic from VLAN objects It should be the driver's business to logically separate its VLAN offloading into a preparation and a commit phase, and some drivers don't need / can't do this. So remove the transactional shim from DSA and let drivers propagate errors directly from the .port_vlan_add callback. It would appear that the code has worse error handling now than it had before. DSA is the only in-kernel user of switchdev that offloads one switchdev object to more than one port: for every VLAN object offloaded to a user port, that VLAN is also offloaded to the CPU port. So the "prepare for user port -> check for errors -> prepare for CPU port -> check for errors -> commit for user port -> commit for CPU port" sequence appears to make more sense than the one we are using now: "offload to user port -> check for errors -> offload to CPU port -> check for errors", but it is really a compromise. In the new way, we can catch errors from the commit phase that we previously had to ignore. But we have our hands tied and cannot do any rollback now: if we add a VLAN on the CPU port and it fails, we can't do the rollback by simply deleting it from the user port, because the switchdev API is not so nice with us: it could have simply been there already, even with the same flags. So we don't even attempt to rollback anything on addition error, just leave whatever VLANs managed to get offloaded right where they are. This should not be a problem at all in practice. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 00:01:53 +00:00
return ret;
}
static int
qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct qca8k_priv *priv = ds->priv;
int ret;
net: switchdev: remove vid_begin -> vid_end range from VLAN objects The call path of a switchdev VLAN addition to the bridge looks something like this today: nbp_vlan_init | __br_vlan_set_default_pvid | | | | | br_afspec | | | | | | | v | | | br_process_vlan_info | | | | | | | v | | | br_vlan_info | | | / \ / | | / \ / | | / \ / | | / \ / v v v v v nbp_vlan_add br_vlan_add ------+ | ^ ^ | | | / | | | | / / / | \ br_vlan_get_master/ / v \ ^ / / br_vlan_add_existing \ | / / | \ | / / / \ | / / / \ | / / / \ | / / / v | | v / __vlan_add / / | / / | / v | / __vlan_vid_add | / \ | / v v v br_switchdev_port_vlan_add The ranges UAPI was introduced to the bridge in commit bdced7ef7838 ("bridge: support for multiple vlans and vlan ranges in setlink and dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec) have always been passed one by one, through struct bridge_vlan_info tmp_vinfo, to br_vlan_info. So the range never went too far in depth. Then Scott Feldman introduced the switchdev_port_bridge_setlink function in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink"). That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made full use of the range. But switchdev_port_bridge_setlink was called like this: br_setlink -> br_afspec -> switchdev_port_bridge_setlink Basically, the switchdev and the bridge code were not tightly integrated. Then commit 41c498b9359e ("bridge: restore br_setlink back to original") came, and switchdev drivers were required to implement .ndo_bridge_setlink = switchdev_port_bridge_setlink for a while. In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op first in __vlan_vid_add/del") finally made switchdev penetrate the br_vlan_info() barrier and start to develop the call path we have today. But remember, br_vlan_info() still receives VLANs one by one. Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit 29ab586c3d83 ("net: switchdev: Remove bridge bypass support from switchdev") so that drivers would not implement .ndo_bridge_setlink any longer. The switchdev_port_bridge_setlink also got deleted. This refactoring removed the parallel bridge_setlink implementation from switchdev, and left the only switchdev VLAN objects to be the ones offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add (the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about bridge VLANs")). That is to say, today the switchdev VLAN object ranges are not used in the kernel. Refactoring the above call path is a bit complicated, when the bridge VLAN call path is already a bit complicated. Let's go off and finish the job of commit 29ab586c3d83 by deleting the bogus iteration through the VLAN ranges from the drivers. Some aspects of this feature never made too much sense in the first place. For example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID flag supposed to mean, when a port can obviously have a single pvid? This particular configuration _is_ denied as of commit 6623c60dc28e ("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API perspective, the driver still has to play pretend, and only offload the vlan->vid_end as pvid. And the addition of a switchdev VLAN object can modify the flags of another, completely unrelated, switchdev VLAN object! (a VLAN that is PVID will invalidate the PVID flag from whatever other VLAN had previously been offloaded with switchdev and had that flag. Yet switchdev never notifies about that change, drivers are supposed to guess). Nonetheless, having a VLAN range in the API makes error handling look scarier than it really is - unwinding on errors and all of that. When in reality, no one really calls this API with more than one VLAN. It is all unnecessary complexity. And despite appearing pretentious (two-phase transactional model and all), the switchdev API is really sloppy because the VLAN addition and removal operations are not paired with one another (you can add a VLAN 100 times and delete it just once). The bridge notifies through switchdev of a VLAN addition not only when the flags of an existing VLAN change, but also when nothing changes. There are switchdev drivers out there who don't like adding a VLAN that has already been added, and those checks don't really belong at driver level. But the fact that the API contains ranges is yet another factor that prevents this from being addressed in the future. Of the existing switchdev pieces of hardware, it appears that only Mellanox Spectrum supports offloading more than one VLAN at a time, through mlxsw_sp_port_vlan_set. I have kept that code internal to the driver, because there is some more bookkeeping that makes use of it, but I deleted it from the switchdev API. But since the switchdev support for ranges has already been de facto deleted by a Mellanox employee and nobody noticed for 4 years, I'm going to assume it's not a biggie. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 00:01:46 +00:00
ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
return ret;
}
static enum dsa_tag_protocol
qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
.port_change_mtu = qca8k_port_change_mtu,
.port_max_mtu = qca8k_port_max_mtu,
.port_stp_state_set = qca8k_port_stp_state_set,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_validate = qca8k_phylink_validate,
.phylink_mac_link_state = qca8k_phylink_mac_link_state,
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
{
const struct qca8k_match_data *data;
u32 val;
u8 id;
/* get the switches ID from the compatible */
data = of_device_get_match_data(priv->dev);
if (!data)
return -ENODEV;
val = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
if (val < 0)
return -ENODEV;
id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
if (id != data->id) {
dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
return -ENODEV;
}
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
return 0;
}
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv;
int ret;
/* allocate the private data struct so that we can probe the switches
* ID register
*/
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
if (IS_ERR(priv->reset_gpio))
return PTR_ERR(priv->reset_gpio);
if (priv->reset_gpio) {
gpiod_set_value_cansleep(priv->reset_gpio, 1);
/* The active low duration must be greater than 10 ms
* and checkpatch.pl wants 20 ms.
*/
msleep(20);
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
return ret;
net: dsa: qca8k: Fix "Unexpected gfp" kernel exception Commit 7e99e3470172 ("net: dsa: remove dsa_switch_alloc helper") replaced the dsa_switch_alloc helper by devm_kzalloc in all DSA drivers. Unfortunately it introduced a typo in qca8k.c driver and wrong argument is passed to the devm_kzalloc function. This fix mitigates the following kernel exception: Unexpected gfp: 0x6 (__GFP_HIGHMEM|GFP_DMA32). Fixing up to gfp: 0x101 (GFP_DMA|__GFP_ZERO). Fix your code! CPU: 1 PID: 44 Comm: kworker/1:1 Not tainted 5.5.9-yocto-ua #1 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) Workqueue: events deferred_probe_work_func [<c0014924>] (unwind_backtrace) from [<c00123bc>] (show_stack+0x10/0x14) [<c00123bc>] (show_stack) from [<c04c8fb4>] (dump_stack+0x90/0xa4) [<c04c8fb4>] (dump_stack) from [<c00e1b10>] (new_slab+0x20c/0x214) [<c00e1b10>] (new_slab) from [<c00e1cd0>] (___slab_alloc.constprop.0+0x1b8/0x540) [<c00e1cd0>] (___slab_alloc.constprop.0) from [<c00e2074>] (__slab_alloc.constprop.0+0x1c/0x24) [<c00e2074>] (__slab_alloc.constprop.0) from [<c00e4538>] (__kmalloc_track_caller+0x1b0/0x298) [<c00e4538>] (__kmalloc_track_caller) from [<c02cccac>] (devm_kmalloc+0x24/0x70) [<c02cccac>] (devm_kmalloc) from [<c030d888>] (qca8k_sw_probe+0x94/0x1ac) [<c030d888>] (qca8k_sw_probe) from [<c0304788>] (mdio_probe+0x30/0x54) [<c0304788>] (mdio_probe) from [<c02c93bc>] (really_probe+0x1e0/0x348) [<c02c93bc>] (really_probe) from [<c02c9884>] (driver_probe_device+0x60/0x16c) [<c02c9884>] (driver_probe_device) from [<c02c7fb0>] (bus_for_each_drv+0x70/0x94) [<c02c7fb0>] (bus_for_each_drv) from [<c02c9708>] (__device_attach+0xb4/0x11c) [<c02c9708>] (__device_attach) from [<c02c8148>] (bus_probe_device+0x84/0x8c) [<c02c8148>] (bus_probe_device) from [<c02c8cec>] (deferred_probe_work_func+0x64/0x90) [<c02c8cec>] (deferred_probe_work_func) from [<c0033c14>] (process_one_work+0x1d4/0x41c) [<c0033c14>] (process_one_work) from [<c00340a4>] (worker_thread+0x248/0x528) [<c00340a4>] (worker_thread) from [<c0039148>] (kthread+0x124/0x150) [<c0039148>] (kthread) from [<c00090d8>] (ret_from_fork+0x14/0x3c) Exception stack(0xee1b5fb0 to 0xee1b5ff8) 5fa0: 00000000 00000000 00000000 00000000 5fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 5fe0: 00000000 00000000 00000000 00000000 00000013 00000000 qca8k 2188000.ethernet-1:0a: Using legacy PHYLIB callbacks. Please migrate to PHYLINK! qca8k 2188000.ethernet-1:0a eth2 (uninitialized): PHY [2188000.ethernet-1:01] driver [Generic PHY] qca8k 2188000.ethernet-1:0a eth1 (uninitialized): PHY [2188000.ethernet-1:02] driver [Generic PHY] Fixes: 7e99e3470172 ("net: dsa: remove dsa_switch_alloc helper") Signed-off-by: Michal Vokáč <michal.vokac@ysoft.com> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-06-03 11:31:39 +00:00
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
return dsa_register_switch(priv->ds);
}
static void
qca8k_sw_remove(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
}
#ifdef CONFIG_PM_SLEEP
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
int i;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (!priv->port_sts[i].enabled)
continue;
qca8k_port_set_status(priv, i, enable);
}
}
static int qca8k_suspend(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 0);
return dsa_switch_suspend(priv->ds);
}
static int qca8k_resume(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 1);
return dsa_switch_resume(priv->ds);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
static const struct qca8k_match_data qca832x = {
.id = QCA8K_ID_QCA8327,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
};
static const struct of_device_id qca8k_of_match[] = {
{ .compatible = "qca,qca8327", .data = &qca832x },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
};
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
.pm = &qca8k_pm_ops,
},
};
mdio_module_driver(qca8kmdio_driver);
MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qca8k");