thunderbolt: Changes for v6.1 merge window

This includes following Thunderbolt/USB4 changes for the v6.1 merge
 window:
 
   - Support for Intel Meteor Lake integrated Thunderbolt/USB4 controller
   - Support for ASMedia USB4 controller NVM firmware upgrade
   - Receiver lane margining support
   - Few fixes and cleanups.
 
 All these have been in linux-next with no reported issues.
 -----BEGIN PGP SIGNATURE-----
 
 iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmM0G3ogHG1pa2Eud2Vz
 dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKAhEg/8CuUZvDwLog7O
 SbpKAPqGOvDC3EM0VhtJyjJAUVWImVaRuSHAWEHn/XJAICaRTxMJJM7xoJnr+X7O
 4VMFzPhF6oF+28VMzy8OLm15kiiNtpXimOKaXNXzoDzJ6aqjgr9KpHSpzhOx3MuF
 HUzhoMWk2RHwnrkxlpewU9wJg8ZtG6syI5QV534KsqIwxOcO+8pr7nKz8V8HrUt9
 EwFIgU4op/eKCsgeLmotw/8CXI33ezIheMeUdSit3Gqa3Ey9u/JtzVaN7siEOhxy
 c3nBO8MgYhKaX0RCVKFGRm/YktFlrECStfDNhgtUbBS2lfoePcaeZGKwuCRpmhkj
 WicPxph8m5slKborqGVaoZtgtxqftOt1tWmvnJntqSLcOycPM4bX8gDfWZmoi6JX
 f+Vritdy9DDTCE00/Hc2gCpQG0rfX+DzpYvLz4Z8tIa5oU9X5CfCNs8qb20VSQAg
 xgmyRe+6/UzJAMKQ7uPy8wSKt/rpKOxsIQC+bhhdiN9g000YS7A56juiC0Z8IfrH
 rZae7pvtoD/r7lrJlRcFKtNPup+QrZZ/kdLzWpZbAmKYZ7DkI44KA4iHVBpt24rg
 8e23s2/U6ezhNMcR0dLWwIrE+KoiB15BkX9/J1CJxhJLW9lw+fxlSfeZDI0rfjE2
 HqcC/Fxx1dMNJykbXAyrO9HYmT551Vs=
 =67d1
 -----END PGP SIGNATURE-----

Merge tag 'thunderbolt-for-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:
  "thunderbolt: Changes for v6.1 merge window

   This includes following Thunderbolt/USB4 changes for the v6.1 merge
   window:
     - Support for Intel Meteor Lake integrated Thunderbolt/USB4 controller
     - Support for ASMedia USB4 controller NVM firmware upgrade
     - Receiver lane margining support
     - Few fixes and cleanups.

   All these have been in linux-next with no reported issues."

* tag 'thunderbolt-for-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  thunderbolt: Explicitly enable lane adapter hotplug events at startup
  thunderbolt: Use dev_err_probe()
  thunderbolt: Convert to use sysfs_emit()/sysfs_emit_at() APIs
  thunderbolt: Fix spelling mistake "simultaneusly" -> "simultaneously"
  thunderbolt: debugfs: Fix spelling mistakes in seq_puts text
  thunderbolt: Add support for ASMedia NVM image format
  thunderbolt: Move vendor specific NVM handling into nvm.c
  thunderbolt: Provide tb_retimer_nvm_read() analogous to tb_switch_nvm_read()
  thunderbolt: Rename and make nvm_read() available for other files
  thunderbolt: Extend NVM version fields to 32-bits
  thunderbolt: Allow NVM upgrade of USB4 host routers
  thunderbolt: Add support for receiver lane margining
  thunderbolt: Add helper to check if CL states are enabled on port
  thunderbolt: Pass CL state bitmask to tb_port_clx_supported()
  thunderbolt: Move port CL state functions into correct place in switch.c
  thunderbolt: Move tb_xdomain_parent() to tb.h
  thunderbolt: Add support for Intel Meteor Lake
  thunderbolt: Add comment where Thunderbolt 4 PCI IDs start
  thunderbolt: Add DP OUT resource when DP tunnel is discovered
This commit is contained in:
Greg Kroah-Hartman 2022-09-30 13:44:59 +02:00
commit bffcd14fac
16 changed files with 1823 additions and 372 deletions

View file

@ -153,7 +153,7 @@ Date: Jan 2020
KernelVersion: 5.5
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute reports number of RX lanes the device is
using simultaneusly through its upstream port.
using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/.../tx_speed
Date: Jan 2020
@ -167,7 +167,7 @@ Date: Jan 2020
KernelVersion: 5.5
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute reports number of TX lanes the device is
using simultaneusly through its upstream port.
using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/.../vendor
Date: Sep 2017

View file

@ -27,6 +27,16 @@ config USB4_DEBUGFS_WRITE
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
config USB4_DEBUGFS_MARGINING
bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)"
depends on DEBUG_FS
depends on USB4_DEBUGFS_WRITE
help
Enables hardware and software based receiver lane margining support
under each USB4 port. Used for electrical quality and robustness
validation during manufacturing. Should not be enabled by distro
kernels.
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
depends on USB4 && KUNIT=y

View file

@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include "tb.h"
#include "sb_regs.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_POWER_LEN 2
@ -187,6 +188,828 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
#define DEBUGFS_MODE 0400
#endif
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
/**
* struct tb_margining - Lane margining support
* @caps: Port lane margining capabilities
* @results: Last lane margining results
* @lanes: %0, %1 or %7 (all)
* @min_ber_level: Minimum supported BER level contour value
* @max_ber_level: Maximum supported BER level contour value
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
* right/high
*/
struct tb_margining {
u32 caps[2];
u32 results[2];
unsigned int lanes;
unsigned int min_ber_level;
unsigned int max_ber_level;
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
unsigned int time_steps;
unsigned int max_time_offset;
bool software;
bool time;
bool right_high;
};
static bool supports_software(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
}
static bool supports_hardware(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
}
static bool both_lanes(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
}
static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
{
return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
}
static bool supports_time(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
}
/* Only applicable if supports_time() returns true */
static unsigned int independent_time_margins(const struct usb4_port *usb4)
{
return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
}
static ssize_t
margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
unsigned int val;
int ret = 0;
char *buf;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (usb4->margining->software) {
ret = -EINVAL;
goto out_unlock;
}
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unlock;
}
buf[count - 1] = '\0';
ret = kstrtouint(buf, 10, &val);
if (ret)
goto out_free;
if (val < usb4->margining->min_ber_level ||
val > usb4->margining->max_ber_level) {
ret = -EINVAL;
goto out_free;
}
usb4->margining->ber_level = val;
out_free:
free_page((unsigned long)buf);
out_unlock:
mutex_unlock(&tb->lock);
return ret < 0 ? ret : count;
}
static void ber_level_show(struct seq_file *s, unsigned int val)
{
if (val % 2)
seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val);
else
seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val);
}
static int margining_ber_level_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
if (usb4->margining->software)
return -EINVAL;
ber_level_show(s, usb4->margining->ber_level);
return 0;
}
DEBUGFS_ATTR_RW(margining_ber_level);
static int margining_caps_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
u32 cap0, cap1;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Dump the raw caps first */
cap0 = usb4->margining->caps[0];
seq_printf(s, "0x%08x\n", cap0);
cap1 = usb4->margining->caps[1];
seq_printf(s, "0x%08x\n", cap1);
seq_printf(s, "# software margining: %s\n",
supports_software(usb4) ? "yes" : "no");
if (supports_hardware(usb4)) {
seq_puts(s, "# hardware margining: yes\n");
seq_puts(s, "# minimum BER level contour: ");
ber_level_show(s, usb4->margining->min_ber_level);
seq_puts(s, "# maximum BER level contour: ");
ber_level_show(s, usb4->margining->max_ber_level);
} else {
seq_puts(s, "# hardware margining: no\n");
}
seq_printf(s, "# both lanes simultaneously: %s\n",
both_lanes(usb4) ? "yes" : "no");
seq_printf(s, "# voltage margin steps: %u\n",
usb4->margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
usb4->margining->max_voltage_offset);
switch (independent_voltage_margins(usb4)) {
case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
seq_puts(s, "# returns minimum between high and low voltage margins\n");
break;
case USB4_MARGIN_CAP_0_VOLTAGE_HL:
seq_puts(s, "# returns high or low voltage margin\n");
break;
case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
seq_puts(s, "# returns both high and low margins\n");
break;
}
if (supports_time(usb4)) {
seq_puts(s, "# time margining: yes\n");
seq_printf(s, "# time margining is destructive: %s\n",
cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
switch (independent_time_margins(usb4)) {
case USB4_MARGIN_CAP_1_TIME_MIN:
seq_puts(s, "# returns minimum between left and right time margins\n");
break;
case USB4_MARGIN_CAP_1_TIME_LR:
seq_puts(s, "# returns left or right margin\n");
break;
case USB4_MARGIN_CAP_1_TIME_BOTH:
seq_puts(s, "# returns both left and right margins\n");
break;
}
seq_printf(s, "# time margin steps: %u\n",
usb4->margining->time_steps);
seq_printf(s, "# maximum time offset: %u mUI\n",
usb4->margining->max_time_offset);
} else {
seq_puts(s, "# time margining: no\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RO(margining_caps);
static ssize_t
margining_lanes_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "0")) {
usb4->margining->lanes = 0;
} else if (!strcmp(buf, "1")) {
usb4->margining->lanes = 1;
} else if (!strcmp(buf, "all")) {
/* Needs to be supported */
if (both_lanes(usb4))
usb4->margining->lanes = 7;
else
ret = -EINVAL;
} else {
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static int margining_lanes_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
unsigned int lanes;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
lanes = usb4->margining->lanes;
if (both_lanes(usb4)) {
if (!lanes)
seq_puts(s, "[0] 1 all\n");
else if (lanes == 1)
seq_puts(s, "0 [1] all\n");
else
seq_puts(s, "0 1 [all]\n");
} else {
if (!lanes)
seq_puts(s, "[0] 1\n");
else
seq_puts(s, "0 [1]\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_lanes);
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "software")) {
if (supports_software(usb4))
usb4->margining->software = true;
else
ret = -EINVAL;
} else if (!strcmp(buf, "hardware")) {
if (supports_hardware(usb4))
usb4->margining->software = false;
else
ret = -EINVAL;
} else {
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_mode_show(struct seq_file *s, void *not_used)
{
const struct tb_port *port = s->private;
const struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
const char *space = "";
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (supports_software(usb4)) {
if (usb4->margining->software)
seq_puts(s, "[software]");
else
seq_puts(s, "software");
space = " ";
}
if (supports_hardware(usb4)) {
if (usb4->margining->software)
seq_printf(s, "%shardware", space);
else
seq_printf(s, "%s[hardware]", space);
}
mutex_unlock(&tb->lock);
seq_puts(s, "\n");
return 0;
}
DEBUGFS_ATTR_RW(margining_mode);
static int margining_run_write(void *data, u64 val)
{
struct tb_port *port = data;
struct usb4_port *usb4 = port->usb4;
struct tb_switch *sw = port->sw;
struct tb_margining *margining;
struct tb *tb = sw->tb;
int ret;
if (val != 1)
return -EINVAL;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
/*
* CL states may interfere with lane margining so inform the user know
* and bail out.
*/
if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
tb_port_warn(port,
"CL states are enabled, Disable them with clx=0 and re-connect\n");
ret = -EINVAL;
goto out_unlock;
}
margining = usb4->margining;
if (margining->software) {
tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
margining->time ? "time" : "voltage", margining->lanes);
ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
margining->right_high,
USB4_MARGIN_SW_COUNTER_CLEAR);
if (ret)
goto out_unlock;
ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
} else {
tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
margining->time ? "time" : "voltage", margining->lanes);
/* Clear the results */
margining->results[0] = 0;
margining->results[1] = 0;
ret = usb4_port_hw_margin(port, margining->lanes,
margining->ber_level, margining->time,
margining->right_high, margining->results);
}
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write,
"%llu\n");
static ssize_t margining_results_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Just clear the results */
usb4->margining->results[0] = 0;
usb4->margining->results[1] = 0;
mutex_unlock(&tb->lock);
return count;
}
static void voltage_margin_show(struct seq_file *s,
const struct tb_margining *margining, u8 val)
{
unsigned int tmp, voltage;
tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
seq_printf(s, "%u mV (%u)", voltage, tmp);
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
static void time_margin_show(struct seq_file *s,
const struct tb_margining *margining, u8 val)
{
unsigned int tmp, interval;
tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
interval = tmp * margining->max_time_offset / margining->time_steps;
seq_printf(s, "%u mUI (%u)", interval, tmp);
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
static int margining_results_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb_margining *margining;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
margining = usb4->margining;
/* Dump the raw results first */
seq_printf(s, "0x%08x\n", margining->results[0]);
/* Only the hardware margining has two result dwords */
if (!margining->software) {
unsigned int val;
seq_printf(s, "0x%08x\n", margining->results[1]);
if (margining->time) {
if (!margining->lanes || margining->lanes == 7) {
val = margining->results[1];
seq_puts(s, "# lane 0 right time margin: ");
time_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 0 left time margin: ");
time_margin_show(s, margining, val);
}
if (margining->lanes == 1 || margining->lanes == 7) {
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
seq_puts(s, "# lane 1 right time margin: ");
time_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 1 left time margin: ");
time_margin_show(s, margining, val);
}
} else {
if (!margining->lanes || margining->lanes == 7) {
val = margining->results[1];
seq_puts(s, "# lane 0 high voltage margin: ");
voltage_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 0 low voltage margin: ");
voltage_margin_show(s, margining, val);
}
if (margining->lanes == 1 || margining->lanes == 7) {
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
seq_puts(s, "# lane 1 high voltage margin: ");
voltage_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 1 low voltage margin: ");
voltage_margin_show(s, margining, val);
}
}
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_results);
static ssize_t margining_test_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "time") && supports_time(usb4))
usb4->margining->time = true;
else if (!strcmp(buf, "voltage"))
usb4->margining->time = false;
else
ret = -EINVAL;
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_test_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (supports_time(usb4)) {
if (usb4->margining->time)
seq_puts(s, "voltage [time]\n");
else
seq_puts(s, "[voltage] time\n");
} else {
seq_puts(s, "[voltage]\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_test);
static ssize_t margining_margin_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (usb4->margining->time) {
if (!strcmp(buf, "left"))
usb4->margining->right_high = false;
else if (!strcmp(buf, "right"))
usb4->margining->right_high = true;
else
ret = -EINVAL;
} else {
if (!strcmp(buf, "low"))
usb4->margining->right_high = false;
else if (!strcmp(buf, "high"))
usb4->margining->right_high = true;
else
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_margin_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (usb4->margining->time) {
if (usb4->margining->right_high)
seq_puts(s, "left [right]\n");
else
seq_puts(s, "[left] right\n");
} else {
if (usb4->margining->right_high)
seq_puts(s, "low [high]\n");
else
seq_puts(s, "[low] high\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_margin);
static void margining_port_init(struct tb_port *port)
{
struct tb_margining *margining;
struct dentry *dir, *parent;
struct usb4_port *usb4;
char dir_name[10];
unsigned int val;
int ret;
usb4 = port->usb4;
if (!usb4)
return;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
margining = kzalloc(sizeof(*margining), GFP_KERNEL);
if (!margining)
return;
ret = usb4_port_margining_caps(port, margining->caps);
if (ret) {
kfree(margining);
return;
}
usb4->margining = margining;
/* Set the initial mode */
if (supports_software(usb4))
margining->software = true;
val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
margining->voltage_steps = val;
val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
margining->max_voltage_offset = 74 + val * 2;
if (supports_time(usb4)) {
val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
margining->time_steps = val;
val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
/*
* Store it as mUI (milli Unit Interval) because we want
* to keep it as integer.
*/
margining->max_time_offset = 200 + 10 * val;
}
dir = debugfs_create_dir("margining", parent);
if (supports_hardware(usb4)) {
val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
margining->min_ber_level = val;
val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
margining->max_ber_level = val;
/* Set the default to minimum */
margining->ber_level = margining->min_ber_level;
debugfs_create_file("ber_level_contour", 0400, dir, port,
&margining_ber_level_fops);
}
debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
if (independent_voltage_margins(usb4) ||
(supports_time(usb4) && independent_time_margins(usb4)))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
static void margining_port_remove(struct tb_port *port)
{
struct dentry *parent;
char dir_name[10];
if (!port->usb4)
return;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
debugfs_remove_recursive(debugfs_lookup("margining", parent));
kfree(port->usb4->margining);
port->usb4->margining = NULL;
}
static void margining_switch_init(struct tb_switch *sw)
{
struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw;
u64 route = tb_route(sw);
if (!route)
return;
upstream = tb_upstream_port(sw);
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
margining_port_init(downstream);
margining_port_init(upstream);
}
static void margining_switch_remove(struct tb_switch *sw)
{
struct tb_switch *parent_sw;
struct tb_port *downstream;
u64 route = tb_route(sw);
if (!route)
return;
/*
* Upstream is removed with the router itself but we need to
* remove the downstream port margining directory.
*/
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
margining_port_remove(downstream);
}
static void margining_xdomain_init(struct tb_xdomain *xd)
{
struct tb_switch *parent_sw;
struct tb_port *downstream;
parent_sw = tb_xdomain_parent(xd);
downstream = tb_port_at(xd->route, parent_sw);
margining_port_init(downstream);
}
static void margining_xdomain_remove(struct tb_xdomain *xd)
{
struct tb_switch *parent_sw;
struct tb_port *downstream;
parent_sw = tb_xdomain_parent(xd);
downstream = tb_port_at(xd->route, parent_sw);
margining_port_remove(downstream);
}
#else
static inline void margining_switch_init(struct tb_switch *sw) { }
static inline void margining_switch_remove(struct tb_switch *sw) { }
static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
#endif
static int port_clear_all_counters(struct tb_port *port)
{
u32 *buf;
@ -689,6 +1512,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
}
margining_switch_init(sw);
}
/**
@ -699,9 +1524,20 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
*/
void tb_switch_debugfs_remove(struct tb_switch *sw)
{
margining_switch_remove(sw);
debugfs_remove_recursive(sw->debugfs_dir);
}
void tb_xdomain_debugfs_init(struct tb_xdomain *xd)
{
margining_xdomain_init(xd);
}
void tb_xdomain_debugfs_remove(struct tb_xdomain *xd)
{
margining_xdomain_remove(xd);
}
/**
* tb_service_debugfs_init() - Add debugfs directory for service
* @svc: Thunderbolt service pointer

View file

@ -144,11 +144,9 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
if (!uuid_is_null(&uuids[i]))
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
&uuids[i]);
ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
i < tb->nboot_acl - 1 ? "," : "\n");
ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
}
out:
@ -247,7 +245,7 @@ static ssize_t deauthorization_show(struct device *dev,
tb->security_level == TB_SECURITY_SECURE)
deauthorization = !!tb->cm_ops->disapprove_switch;
return sprintf(buf, "%d\n", deauthorization);
return sysfs_emit(buf, "%d\n", deauthorization);
}
static DEVICE_ATTR_RO(deauthorization);
@ -270,7 +268,7 @@ static ssize_t security_show(struct device *dev, struct device_attribute *attr,
if (tb->security_level < ARRAY_SIZE(tb_security_names))
name = tb_security_names[tb->security_level];
return sprintf(buf, "%s\n", name);
return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(security);

View file

@ -2518,6 +2518,9 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
case PCI_DEVICE_ID_INTEL_RPL_NHI0:
case PCI_DEVICE_ID_INTEL_RPL_NHI1:
case PCI_DEVICE_ID_INTEL_MTL_M_NHI0:
case PCI_DEVICE_ID_INTEL_MTL_P_NHI0:
case PCI_DEVICE_ID_INTEL_MTL_P_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;

View file

@ -1149,6 +1149,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
@ -1179,10 +1180,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
if (res) {
dev_err(&pdev->dev, "request_irq failed, aborting\n");
return res;
}
if (res)
return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
@ -1223,26 +1222,21 @@ static struct tb *nhi_select_cm(struct tb_nhi *nhi)
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
if (!nhi_imr_valid(pdev)) {
dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
return -ENODEV;
}
if (!nhi_imr_valid(pdev))
return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
res = pcim_enable_device(pdev);
if (res) {
dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
return res;
}
if (res)
return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
if (res) {
dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
return res;
}
if (res)
return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
@ -1253,7 +1247,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@ -1266,18 +1260,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
if (res) {
dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
return res;
}
if (res)
return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (res) {
dev_err(&pdev->dev, "failed to set DMA mask\n");
return res;
}
if (res)
return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
@ -1288,13 +1278,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
tb = nhi_select_cm(nhi);
if (!tb) {
dev_err(&nhi->pdev->dev,
if (!tb)
return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
return -ENODEV;
}
dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
@ -1398,6 +1386,7 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Thunderbolt 4 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
@ -1414,6 +1403,12 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },

View file

@ -75,6 +75,9 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b

View file

@ -12,19 +12,315 @@
#include "tb.h"
/* Intel specific NVM offsets */
#define INTEL_NVM_DEVID 0x05
#define INTEL_NVM_VERSION 0x08
#define INTEL_NVM_CSS 0x10
#define INTEL_NVM_FLASH_SIZE 0x45
/* ASMedia specific NVM offsets */
#define ASMEDIA_NVM_DATE 0x1c
#define ASMEDIA_NVM_VERSION 0x28
static DEFINE_IDA(nvm_ida);
/**
* struct tb_nvm_vendor_ops - Vendor specific NVM operations
* @read_version: Reads out NVM version from the flash
* @validate: Validates the NVM image before update (optional)
* @write_headers: Writes headers before the rest of the image (optional)
*/
struct tb_nvm_vendor_ops {
int (*read_version)(struct tb_nvm *nvm);
int (*validate)(struct tb_nvm *nvm);
int (*write_headers)(struct tb_nvm *nvm);
};
/**
* struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
* @vendor: Vendor ID
* @vops: Vendor specific NVM operations
*
* Maps vendor ID to NVM vendor operations. If there is no mapping then
* NVM firmware upgrade is disabled for the device.
*/
struct tb_nvm_vendor {
u16 vendor;
const struct tb_nvm_vendor_ops *vops;
};
static int intel_switch_nvm_version(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
u32 val, nvm_size, hdr_size;
int ret;
/*
* If the switch is in safe-mode the only accessible portion of
* the NVM is the non-active one where userspace is expected to
* write new functional NVM.
*/
if (sw->safe_mode)
return 0;
ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
return ret;
hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val >> 16) & 0xff;
nvm->minor = (val >> 8) & 0xff;
nvm->active_size = nvm_size;
return 0;
}
static int intel_switch_nvm_validate(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
unsigned int image_size, hdr_size;
u16 ds_size, device_id;
u8 *buf = nvm->buf;
image_size = nvm->buf_data_size;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
if (sw->safe_mode)
return 0;
/*
* Make sure the device ID in the image matches the one
* we read from the switch config space.
*/
device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
if (device_id != sw->config.device_id)
return -EINVAL;
/* Skip headers in the image */
nvm->buf_data_start = buf + hdr_size;
nvm->buf_data_size = image_size - hdr_size;
return 0;
}
static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
if (sw->generation < 3) {
int ret;
/* Write CSS headers first */
ret = dma_port_flash_write(sw->dma_port,
DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
DMA_PORT_CSS_MAX_SIZE);
if (ret)
return ret;
}
return 0;
}
static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
.read_version = intel_switch_nvm_version,
.validate = intel_switch_nvm_validate,
.write_headers = intel_switch_nvm_write_headers,
};
static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
u32 val;
int ret;
ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val << 16) & 0xff0000;
nvm->major |= val & 0x00ff00;
nvm->major |= (val >> 16) & 0x0000ff;
ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
if (ret)
return ret;
nvm->minor = (val << 16) & 0xff0000;
nvm->minor |= val & 0x00ff00;
nvm->minor |= (val >> 16) & 0x0000ff;
/* ASMedia NVM size is fixed to 512k */
nvm->active_size = SZ_512K;
return 0;
}
static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
.read_version = asmedia_switch_nvm_version,
};
/* Router vendor NVM support table */
static const struct tb_nvm_vendor switch_nvm_vendors[] = {
{ 0x174c, &asmedia_switch_nvm_ops },
{ PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
{ 0x8087, &intel_switch_nvm_ops },
};
static int intel_retimer_nvm_version(struct tb_nvm *nvm)
{
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
u32 val, nvm_size;
int ret;
ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val >> 16) & 0xff;
nvm->minor = (val >> 8) & 0xff;
ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
return ret;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - SZ_16K) / 2;
nvm->active_size = nvm_size;
return 0;
}
static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
{
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
unsigned int image_size, hdr_size;
u8 *buf = nvm->buf;
u16 ds_size, device;
image_size = nvm->buf_data_size;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
/*
* Make sure the device ID in the image matches the retimer
* hardware.
*/
device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
if (device != rt->device)
return -EINVAL;
/* Skip headers in the image */
nvm->buf_data_start = buf + hdr_size;
nvm->buf_data_size = image_size - hdr_size;
return 0;
}
static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
.read_version = intel_retimer_nvm_version,
.validate = intel_retimer_nvm_validate,
};
/* Retimer vendor NVM support table */
static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
{ 0x8087, &intel_retimer_nvm_ops },
};
/**
* tb_nvm_alloc() - Allocate new NVM structure
* @dev: Device owning the NVM
*
* Allocates new NVM structure with unique @id and returns it. In case
* of error returns ERR_PTR().
* of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
* NVM format of the @dev is not known by the kernel.
*/
struct tb_nvm *tb_nvm_alloc(struct device *dev)
{
const struct tb_nvm_vendor_ops *vops = NULL;
struct tb_nvm *nvm;
int ret;
int ret, i;
if (tb_is_switch(dev)) {
const struct tb_switch *sw = tb_to_switch(dev);
for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
if (v->vendor == sw->config.vendor_id) {
vops = v->vops;
break;
}
}
if (!vops) {
tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
sw->config.vendor_id);
return ERR_PTR(-EOPNOTSUPP);
}
} else if (tb_is_retimer(dev)) {
const struct tb_retimer *rt = tb_to_retimer(dev);
for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
if (v->vendor == rt->vendor) {
vops = v->vops;
break;
}
}
if (!vops) {
dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
rt->vendor);
return ERR_PTR(-EOPNOTSUPP);
}
} else {
return ERR_PTR(-EOPNOTSUPP);
}
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@ -38,14 +334,85 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
nvm->id = ret;
nvm->dev = dev;
nvm->vops = vops;
return nvm;
}
/**
* tb_nvm_read_version() - Read and populate NVM version
* @nvm: NVM structure
*
* Uses vendor specific means to read out and fill in the existing
* active NVM version. Returns %0 in case of success and negative errno
* otherwise.
*/
int tb_nvm_read_version(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
if (vops && vops->read_version)
return vops->read_version(nvm);
return -EOPNOTSUPP;
}
/**
* tb_nvm_validate() - Validate new NVM image
* @nvm: NVM structure
*
* Runs vendor specific validation over the new NVM image and if all
* checks pass returns %0. As side effect updates @nvm->buf_data_start
* and @nvm->buf_data_size fields to match the actual data to be written
* to the NVM.
*
* If the validation does not pass then returns negative errno.
*/
int tb_nvm_validate(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
unsigned int image_size;
u8 *buf = nvm->buf;
if (!buf)
return -EINVAL;
if (!vops)
return -EOPNOTSUPP;
/* Just do basic image size checks */
image_size = nvm->buf_data_size;
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
return -EINVAL;
/*
* Set the default data start in the buffer. The validate method
* below can change this if needed.
*/
nvm->buf_data_start = buf;
return vops->validate ? vops->validate(nvm) : 0;
}
/**
* tb_nvm_write_headers() - Write headers before the rest of the image
* @nvm: NVM structure
*
* If the vendor NVM format requires writing headers before the rest of
* the image, this function does that. Can be called even if the device
* does not need this.
*
* Returns %0 in case of success and negative errno otherwise.
*/
int tb_nvm_write_headers(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
return vops->write_headers ? vops->write_headers(nvm) : 0;
}
/**
* tb_nvm_add_active() - Adds active NVMem device to NVM
* @nvm: NVM structure
* @size: Size of the active NVM in bytes
* @reg_read: Pointer to the function to read the NVM (passed directly to the
* NVMem device)
*
@ -54,7 +421,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
* needed. The first parameter passed to @reg_read is @nvm structure.
* Returns %0 in success and negative errno otherwise.
*/
int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@ -67,7 +434,7 @@ int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
config.size = size;
config.size = nvm->active_size;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
@ -109,17 +476,17 @@ int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
/**
* tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
* @nvm: NVM structure
* @size: Size of the non-active NVM in bytes
* @reg_write: Pointer to the function to write the NVM (passed directly
* to the NVMem device)
*
* Registers new non-active NVmem device for @nvm. The @reg_write is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_write is @nvm structure.
* The size of the NVMem device is set to %NVM_MAX_SIZE.
*
* Returns %0 in success and negative errno otherwise.
*/
int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
nvmem_reg_write_t reg_write)
int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@ -132,7 +499,7 @@ int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
config.size = size;
config.size = NVM_MAX_SIZE;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;

View file

@ -16,8 +16,23 @@
#define TB_MAX_RETIMER_INDEX 6
static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
/**
* tb_retimer_nvm_read() - Read contents of retimer NVM
* @rt: Retimer device
* @address: NVM address (in bytes) to start reading
* @buf: Data read from NVM is stored here
* @size: Number of bytes to read
*
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
* read was successful and negative errno in case of failure.
*/
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size)
{
return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
}
static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@ -30,7 +45,7 @@ static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
ret = tb_retimer_nvm_read(rt, offset, val, bytes);
mutex_unlock(&rt->tb->lock);
out:
@ -40,8 +55,7 @@ static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
return ret;
}
static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@ -59,34 +73,23 @@ static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_retimer_nvm_add(struct tb_retimer *rt)
{
struct tb_nvm *nvm;
u32 val, nvm_size;
int ret;
nvm = tb_nvm_alloc(&rt->dev);
if (IS_ERR(nvm))
return PTR_ERR(nvm);
if (IS_ERR(nvm)) {
ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
}
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
sizeof(val));
ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
nvm->major = val >> 16;
nvm->minor = val >> 8;
ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
&val, sizeof(val));
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - SZ_16K) / 2;
ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
if (ret)
goto err_nvm;
ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
@ -94,59 +97,33 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
return 0;
err_nvm:
tb_nvm_free(nvm);
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
return ret;
}
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
{
unsigned int image_size, hdr_size;
const u8 *buf = rt->nvm->buf;
u16 ds_size, device;
unsigned int image_size;
const u8 *buf;
int ret;
ret = tb_nvm_validate(rt->nvm);
if (ret)
return ret;
buf = rt->nvm->buf_data_start;
image_size = rt->nvm->buf_data_size;
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
return -EINVAL;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
/*
* Make sure the device ID in the image matches the retimer
* hardware.
*/
device = *(u16 *)(buf + hdr_size + NVM_DEVID);
if (device != rt->device)
return -EINVAL;
/* Skip headers in the image */
buf += hdr_size;
image_size -= hdr_size;
ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
image_size);
if (!ret)
rt->nvm->flushed = true;
if (ret)
return ret;
return ret;
rt->nvm->flushed = true;
return 0;
}
static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
@ -185,7 +162,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
return sprintf(buf, "%#x\n", rt->device);
return sysfs_emit(buf, "%#x\n", rt->device);
}
static DEVICE_ATTR_RO(device);
@ -200,8 +177,10 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
else if (rt->no_nvm_upgrade)
ret = -EOPNOTSUPP;
else
ret = sprintf(buf, "%#x\n", rt->auth_status);
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
mutex_unlock(&rt->tb->lock);
@ -276,7 +255,7 @@ static ssize_t nvm_version_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
else
ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
mutex_unlock(&rt->tb->lock);
return ret;
@ -288,7 +267,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
return sprintf(buf, "%#x\n", rt->vendor);
return sysfs_emit(buf, "%#x\n", rt->vendor);
}
static DEVICE_ATTR_RO(vendor);

View file

@ -26,10 +26,68 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */
USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */
USB4_SB_OPCODE_READ_LANE_MARGINING_CAP = 0x50434452, /* "RDCP" */
USB4_SB_OPCODE_RUN_HW_LANE_MARGINING = 0x474d4852, /* "RHMG" */
USB4_SB_OPCODE_RUN_SW_LANE_MARGINING = 0x474d5352, /* "RSMG" */
USB4_SB_OPCODE_READ_SW_MARGIN_ERR = 0x57534452, /* "RDSW" */
};
#define USB4_SB_METADATA 0x09
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
#define USB4_SB_DATA 0x12
/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */
#define USB4_MARGIN_CAP_0_MODES_HW BIT(0)
#define USB4_MARGIN_CAP_0_MODES_SW BIT(1)
#define USB4_MARGIN_CAP_0_2_LANES BIT(2)
#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3)
#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3
#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0
#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1
#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2
#define USB4_MARGIN_CAP_0_TIME BIT(5)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13
#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9
#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
#define USB4_MARGIN_CAP_1_TIME_LR 0x1
#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2
#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11)
#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11
#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16)
#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16
#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21)
#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21
#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26)
#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */
#define USB4_MARGIN_HW_TIME BIT(3)
#define USB4_MARGIN_HW_RH BIT(4)
#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
#define USB4_MARGIN_HW_BER_SHIFT 5
/* Applicable to all margin values */
#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7)
/* Different lane margin shifts */
#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8
#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16
#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
#define USB4_MARGIN_SW_TIME BIT(3)
#define USB4_MARGIN_SW_RH BIT(4)
#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
#define USB4_MARGIN_SW_COUNTER_SHIFT 13
#define USB4_MARGIN_SW_COUNTER_NOP 0x0
#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
#define USB4_MARGIN_SW_COUNTER_START 0x2
#define USB4_MARGIN_SW_COUNTER_STOP 0x3
#endif

View file

@ -19,8 +19,6 @@
/* Switch NVM support */
#define NVM_CSS 0x10
struct nvm_auth_status {
struct list_head list;
uuid_t uuid;
@ -102,70 +100,30 @@ static void nvm_clear_auth_status(const struct tb_switch *sw)
static int nvm_validate_and_write(struct tb_switch *sw)
{
unsigned int image_size, hdr_size;
const u8 *buf = sw->nvm->buf;
u16 ds_size;
unsigned int image_size;
const u8 *buf;
int ret;
if (!buf)
return -EINVAL;
ret = tb_nvm_validate(sw->nvm);
if (ret)
return ret;
ret = tb_nvm_write_headers(sw->nvm);
if (ret)
return ret;
buf = sw->nvm->buf_data_start;
image_size = sw->nvm->buf_data_size;
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
return -EINVAL;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
if (!sw->safe_mode) {
u16 device_id;
/*
* Make sure the device ID in the image matches the one
* we read from the switch config space.
*/
device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
if (device_id != sw->config.device_id)
return -EINVAL;
if (sw->generation < 3) {
/* Write CSS headers first */
ret = dma_port_flash_write(sw->dma_port,
DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
DMA_PORT_CSS_MAX_SIZE);
if (ret)
return ret;
}
/* Skip headers in the image */
buf += hdr_size;
image_size -= hdr_size;
}
if (tb_switch_is_usb4(sw))
ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
else
ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
if (!ret)
sw->nvm->flushed = true;
return ret;
if (ret)
return ret;
sw->nvm->flushed = true;
return 0;
}
static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
@ -300,14 +258,6 @@ static inline bool nvm_upgradeable(struct tb_switch *sw)
return nvm_readable(sw);
}
static inline int nvm_read(struct tb_switch *sw, unsigned int address,
void *buf, size_t size)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_nvm_read(sw, address, buf, size);
return dma_port_flash_read(sw->dma_port, address, buf, size);
}
static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
{
int ret;
@ -335,8 +285,26 @@ static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
return ret;
}
static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
/**
* tb_switch_nvm_read() - Read router NVM
* @sw: Router whose NVM to read
* @address: Start address on the NVM
* @buf: Buffer where the read data is copied
* @size: Size of the buffer in bytes
*
* Reads from router NVM and returns the requested data in @buf. Locking
* is up to the caller. Returns %0 in success and negative errno in case
* of failure.
*/
int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_nvm_read(sw, address, buf, size);
return dma_port_flash_read(sw->dma_port, address, buf, size);
}
static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@ -349,7 +317,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
ret = nvm_read(sw, offset, val, bytes);
ret = tb_switch_nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@ -359,8 +327,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
return ret;
}
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@ -384,28 +351,20 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_switch_nvm_add(struct tb_switch *sw)
{
struct tb_nvm *nvm;
u32 val;
int ret;
if (!nvm_readable(sw))
return 0;
/*
* The NVM format of non-Intel hardware is not known so
* currently restrict NVM upgrade for Intel hardware. We may
* relax this in the future when we learn other NVM formats.
*/
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
sw->config.vendor_id != 0x8087) {
dev_info(&sw->dev,
"NVM format of vendor %#x is not known, disabling NVM upgrade\n",
sw->config.vendor_id);
return 0;
nvm = tb_nvm_alloc(&sw->dev);
if (IS_ERR(nvm)) {
ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
}
nvm = tb_nvm_alloc(&sw->dev);
if (IS_ERR(nvm))
return PTR_ERR(nvm);
ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
/*
* If the switch is in safe-mode the only accessible portion of
@ -413,31 +372,13 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
* write new functional NVM.
*/
if (!sw->safe_mode) {
u32 nvm_size, hdr_size;
ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
goto err_nvm;
hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
if (ret)
goto err_nvm;
nvm->major = val >> 16;
nvm->minor = val >> 8;
ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
}
if (!sw->no_nvm_upgrade) {
ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
tb_switch_nvm_write);
ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
}
@ -446,7 +387,11 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
return 0;
err_nvm:
tb_nvm_free(nvm);
tb_sw_dbg(sw, "NVM upgrade disabled\n");
sw->no_nvm_upgrade = true;
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
return ret;
}
@ -1229,6 +1174,135 @@ int tb_port_update_credits(struct tb_port *port)
return tb_port_do_update_credits(port->dual_link_port);
}
static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, false);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
{
u32 val, mask = 0;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
if (clx_mask & TB_CL1) {
/* CL0s and CL1 are enabled and supported together */
mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
}
if (clx_mask & TB_CL2)
mask |= LANE_ADP_CS_0_CL2_SUPPORT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
u32 phy, mask;
int ret;
/* CL0s and CL1 are enabled and supported together */
if (clx == TB_CL1)
mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
else
/* For now we support only CL0s and CL1. Not CL2 */
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, true);
}
/**
* tb_port_is_clx_enabled() - Is given CL state enabled
* @port: USB4 port to check
* @clx_mask: Mask of CL states to check
*
* Returns true if any of the given CL states is enabled for @port.
*/
bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
{
u32 val, mask = 0;
int ret;
if (!tb_port_clx_supported(port, clx_mask))
return false;
if (clx_mask & TB_CL1)
mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
if (clx_mask & TB_CL2)
mask |= LANE_ADP_CS_1_CL2_ENABLE;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return false;
return !!(val & mask);
}
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@ -1620,7 +1694,7 @@ static ssize_t authorized_show(struct device *dev,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%u\n", sw->authorized);
return sysfs_emit(buf, "%u\n", sw->authorized);
}
static int disapprove_switch(struct device *dev, void *not_used)
@ -1730,7 +1804,7 @@ static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%u\n", sw->boot);
return sysfs_emit(buf, "%u\n", sw->boot);
}
static DEVICE_ATTR_RO(boot);
@ -1739,7 +1813,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%#x\n", sw->device);
return sysfs_emit(buf, "%#x\n", sw->device);
}
static DEVICE_ATTR_RO(device);
@ -1748,7 +1822,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
}
static DEVICE_ATTR_RO(device_name);
@ -1757,7 +1831,7 @@ generation_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%u\n", sw->generation);
return sysfs_emit(buf, "%u\n", sw->generation);
}
static DEVICE_ATTR_RO(generation);
@ -1771,9 +1845,9 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
return restart_syscall();
if (sw->key)
ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
else
ret = sprintf(buf, "\n");
ret = sysfs_emit(buf, "\n");
mutex_unlock(&sw->tb->lock);
return ret;
@ -1818,7 +1892,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
}
/*
@ -1833,7 +1907,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%u\n", sw->link_width);
return sysfs_emit(buf, "%u\n", sw->link_width);
}
/*
@ -1850,7 +1924,7 @@ static ssize_t nvm_authenticate_show(struct device *dev,
u32 status;
nvm_get_auth_status(sw, &status);
return sprintf(buf, "%#x\n", status);
return sysfs_emit(buf, "%#x\n", status);
}
static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
@ -1866,6 +1940,11 @@ static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
goto exit_rpm;
}
if (sw->no_nvm_upgrade) {
ret = -EOPNOTSUPP;
goto exit_unlock;
}
/* If NVMem devices are not yet added */
if (!sw->nvm) {
ret = -EAGAIN;
@ -1954,7 +2033,7 @@ static ssize_t nvm_version_show(struct device *dev,
else if (!sw->nvm)
ret = -EAGAIN;
else
ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
mutex_unlock(&sw->tb->lock);
@ -1967,7 +2046,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%#x\n", sw->vendor);
return sysfs_emit(buf, "%#x\n", sw->vendor);
}
static DEVICE_ATTR_RO(vendor);
@ -1976,7 +2055,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
}
static DEVICE_ATTR_RO(vendor_name);
@ -1985,7 +2064,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
return sprintf(buf, "%pUb\n", sw->uuid);
return sysfs_emit(buf, "%pUb\n", sw->uuid);
}
static DEVICE_ATTR_RO(unique_id);
@ -2821,6 +2900,26 @@ static void tb_switch_credits_init(struct tb_switch *sw)
tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
}
static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
{
struct tb_port *port;
if (tb_switch_is_icm(sw))
return 0;
tb_switch_for_each_port(sw, port) {
int res;
if (!port->cap_usb4)
continue;
res = usb4_port_hotplug_enable(port);
if (res)
return res;
}
return 0;
}
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@ -2890,6 +2989,10 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
}
ret = tb_switch_port_hotplug_enable(sw);
if (ret)
return ret;
ret = device_add(&sw->dev);
if (ret) {
dev_err(&sw->dev, "failed to add device: %d\n", ret);
@ -3361,35 +3464,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, false);
}
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
@ -3408,83 +3482,6 @@ static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
return tb_port_pm_secondary_disable(down);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
{
u32 mask, val;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
switch (clx) {
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
break;
/* For now we support only CL0s and CL1. Not CL2 */
case TB_CL2:
default:
return false;
}
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
u32 phy, mask;
int ret;
/* CL0s and CL1 are enabled and supported together */
if (clx == TB_CL1)
mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
else
/* For now we support only CL0s and CL1. Not CL2 */
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, true);
}
static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);

View file

@ -105,6 +105,32 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
}
}
static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *p;
list_for_each_entry(p, &tcm->dp_resources, list) {
if (p == port)
return;
}
tb_port_dbg(port, "DP %s resource available discovered\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
}
static void tb_discover_dp_resources(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel))
tb_discover_dp_resource(tb, tunnel->dst_port);
}
}
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
@ -1416,8 +1442,11 @@ static int tb_start(struct tb *tb)
* ICM firmware upgrade needs running firmware and in native
* mode that is not available so disable firmware upgrade of the
* root switch.
*
* However, USB4 routers support NVM firmware upgrade if they
* implement the necessary router operations.
*/
tb->root_switch->no_nvm_upgrade = true;
tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
/* All USB4 routers support runtime PM */
tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
@ -1446,6 +1475,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
/* Add DP resources from the DP tunnels created by the boot firmware */
tb_discover_dp_resources(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.

View file

@ -23,11 +23,6 @@
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
/* Intel specific NVM offsets */
#define NVM_DEVID 0x05
#define NVM_VERSION 0x08
#define NVM_FLASH_SIZE 0x45
/**
* struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM
@ -35,28 +30,35 @@
* @minor: Minor version number of the active NVM portion
* @id: Identifier used with both NVM portions
* @active: Active portion NVMem device
* @active_size: Size in bytes of the active NVM
* @non_active: Non-active portion NVMem device
* @buf: Buffer where the NVM image is stored before it is written to
* the actual NVM flash device
* @buf_data_start: Where the actual image starts after skipping
* possible headers
* @buf_data_size: Number of bytes actually consumed by the new NVM
* image
* @authenticating: The device is authenticating the new NVM
* @flushed: The image has been flushed to the storage area
* @vops: Router vendor specific NVM operations (optional)
*
* The user of this structure needs to handle serialization of possible
* concurrent access.
*/
struct tb_nvm {
struct device *dev;
u8 major;
u8 minor;
u32 major;
u32 minor;
int id;
struct nvmem_device *active;
size_t active_size;
struct nvmem_device *non_active;
void *buf;
void *buf_data_start;
size_t buf_data_size;
bool authenticating;
bool flushed;
const struct tb_nvm_vendor_ops *vops;
};
enum tb_nvm_write_ops {
@ -113,8 +115,8 @@ struct tb_switch_tmu {
enum tb_clx {
TB_CLX_DISABLE,
/* CL0s and CL1 are enabled and supported together */
TB_CL1,
TB_CL2,
TB_CL1 = BIT(0),
TB_CL2 = BIT(1),
};
/**
@ -279,12 +281,16 @@ struct tb_port {
* @can_offline: Does the port have necessary platform support to moved
* it into offline mode and back
* @offline: The port is currently in offline mode
* @margining: Pointer to margining structure if enabled
*/
struct usb4_port {
struct device dev;
struct tb_port *port;
bool can_offline;
bool offline;
#ifdef CONFIG_USB4_DEBUGFS_MARGINING
struct tb_margining *margining;
#endif
};
/**
@ -296,6 +302,7 @@ struct usb4_port {
* @device: Device ID of the retimer
* @port: Pointer to the lane 0 adapter
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
* @no_nvm_upgrade: Prevent NVM upgrade of this retimer
* @auth_status: Status of last NVM authentication
*/
struct tb_retimer {
@ -306,6 +313,7 @@ struct tb_retimer {
u32 device;
struct tb_port *port;
struct tb_nvm *nvm;
bool no_nvm_upgrade;
u32 auth_status;
};
@ -737,11 +745,13 @@ static inline void tb_domain_put(struct tb *tb)
}
struct tb_nvm *tb_nvm_alloc(struct device *dev);
int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
int tb_nvm_read_version(struct tb_nvm *nvm);
int tb_nvm_validate(struct tb_nvm *nvm);
int tb_nvm_write_headers(struct tb_nvm *nvm);
int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
size_t bytes);
int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
nvmem_reg_write_t reg_write);
int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
void tb_nvm_free(struct tb_nvm *nvm);
void tb_nvm_exit(void);
@ -755,6 +765,8 @@ int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
unsigned int retries, write_block_fn write_next_block,
void *write_block_data);
int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route);
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
@ -1035,6 +1047,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@ -1132,6 +1145,13 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
{
return tb_to_switch(xd->dev.parent);
}
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size);
int tb_retimer_scan(struct tb_port *port, bool add);
void tb_retimer_remove_all(struct tb_port *port);
@ -1174,6 +1194,7 @@ int usb4_switch_add_ports(struct tb_switch *sw);
void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port);
@ -1182,6 +1203,13 @@ int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
unsigned int ber_level, bool timing, bool right_high,
u32 *results);
int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
bool right_high, u32 counter);
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
@ -1264,6 +1292,8 @@ void tb_debugfs_init(void);
void tb_debugfs_exit(void);
void tb_switch_debugfs_init(struct tb_switch *sw);
void tb_switch_debugfs_remove(struct tb_switch *sw);
void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
#else
@ -1271,6 +1301,8 @@ static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
#endif

View file

@ -308,6 +308,7 @@ struct tb_regs_port_header {
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
#define ADP_CS_5_DHP BIT(31)
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
@ -324,6 +325,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_0_CL2_SUPPORT BIT(28)
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
@ -333,6 +335,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
#define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)

View file

@ -1046,6 +1046,26 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
/**
* usb4_port_hotplug_enable() - Enables hotplug for a port
* @port: USB4 port to operate on
*
* Enables hot plug events on a given port. This is only intended
* to be used on lane, DP-IN, and DP-OUT adapters.
*/
int usb4_port_hotplug_enable(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
val &= ~ADP_CS_5_DHP;
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@ -1384,6 +1404,126 @@ bool usb4_port_clx_supported(struct tb_port *port)
return !!(val & PORT_CS_18_CPS);
}
/**
* usb4_port_margining_caps() - Read USB4 port marginig capabilities
* @port: USB4 port
* @caps: Array with at least two elements to hold the results
*
* Reads the USB4 port lane margining capabilities into @caps.
*/
int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
{
int ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_DATA, caps, sizeof(*caps) * 2);
}
/**
* usb4_port_hw_margin() - Run hardware lane margining on port
* @port: USB4 port
* @lanes: Which lanes to run (must match the port capabilities). Can be
* %0, %1 or %7.
* @ber_level: BER level contour value
* @timing: Perform timing margining instead of voltage
* @right_high: Use Right/high margin instead of left/low
* @results: Array with at least two elements to hold the results
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
*/
int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
unsigned int ber_level, bool timing, bool right_high,
u32 *results)
{
u32 val;
int ret;
val = lanes;
if (timing)
val |= USB4_MARGIN_HW_TIME;
if (right_high)
val |= USB4_MARGIN_HW_RH;
if (ber_level)
val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
USB4_MARGIN_HW_BER_MASK;
ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, &val, sizeof(val));
if (ret)
return ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_DATA, results, sizeof(*results) * 2);
}
/**
* usb4_port_sw_margin() - Run software lane margining on port
* @port: USB4 port
* @lanes: Which lanes to run (must match the port capabilities). Can be
* %0, %1 or %7.
* @timing: Perform timing margining instead of voltage
* @right_high: Use Right/high margin instead of left/low
* @counter: What to do with the error counter
*
* Runs software lane margining on USB4 port. Read back the error
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
* success and negative errno otherwise.
*/
int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
bool right_high, u32 counter)
{
u32 val;
int ret;
val = lanes;
if (timing)
val |= USB4_MARGIN_SW_TIME;
if (right_high)
val |= USB4_MARGIN_SW_RH;
val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
USB4_MARGIN_SW_COUNTER_MASK;
ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, &val, sizeof(val));
if (ret)
return ret;
return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
}
/**
* usb4_port_sw_margin_errors() - Read the software margining error counters
* @port: USB4 port
* @errors: Error metadata is copied here.
*
* This reads back the software margining error counters from the port.
* Returns %0 in success and negative errno otherwise.
*/
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
{
int ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, errors, sizeof(*errors));
}
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)

View file

@ -877,7 +877,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
@ -903,7 +903,7 @@ static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sprintf(buf, "%u\n", svc->prtcid);
return sysfs_emit(buf, "%u\n", svc->prtcid);
}
static DEVICE_ATTR_RO(prtcid);
@ -912,7 +912,7 @@ static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sprintf(buf, "%u\n", svc->prtcvers);
return sysfs_emit(buf, "%u\n", svc->prtcvers);
}
static DEVICE_ATTR_RO(prtcvers);
@ -921,7 +921,7 @@ static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sprintf(buf, "%u\n", svc->prtcrevs);
return sysfs_emit(buf, "%u\n", svc->prtcrevs);
}
static DEVICE_ATTR_RO(prtcrevs);
@ -930,7 +930,7 @@ static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sprintf(buf, "0x%08x\n", svc->prtcstns);
return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
}
static DEVICE_ATTR_RO(prtcstns);
@ -1131,11 +1131,6 @@ static int populate_properties(struct tb_xdomain *xd,
return 0;
}
static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
{
return tb_to_switch(xd->dev.parent);
}
static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
bool change = false;
@ -1440,6 +1435,8 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->vendor_name && xd->device_name)
dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
xd->device_name);
tb_xdomain_debugfs_init(xd);
} else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
}
@ -1664,7 +1661,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%#x\n", xd->device);
return sysfs_emit(buf, "%#x\n", xd->device);
}
static DEVICE_ATTR_RO(device);
@ -1676,7 +1673,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@ -1688,7 +1685,7 @@ static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%d\n", xd->remote_max_hopid);
return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
}
static DEVICE_ATTR_RO(maxhopid);
@ -1697,7 +1694,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%#x\n", xd->vendor);
return sysfs_emit(buf, "%#x\n", xd->vendor);
}
static DEVICE_ATTR_RO(vendor);
@ -1709,7 +1706,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@ -1721,7 +1718,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%pUb\n", xd->remote_uuid);
return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
}
static DEVICE_ATTR_RO(unique_id);
@ -1730,7 +1727,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
}
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
@ -1741,7 +1738,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%u\n", xd->link_width);
return sysfs_emit(buf, "%u\n", xd->link_width);
}
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
@ -1940,6 +1937,8 @@ static int unregister_service(struct device *dev, void *data)
*/
void tb_xdomain_remove(struct tb_xdomain *xd)
{
tb_xdomain_debugfs_remove(xd);
stop_handshake(xd);
device_for_each_child_reverse(&xd->dev, xd, unregister_service);