Staging: remove wusbcore and UWB from the kernel tree.

It's been over 6 months, and no one has noticed that these drivers are
deleted, probably because no one actually has this hardware.  As no one
has volunteered to maintain the code, let's drop it for good.

Link: https://lore.kernel.org/r/20200210231417.GA1736729@kroah.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2020-02-10 15:14:17 -08:00
parent 4fb8b5aa2a
commit caa6772db4
85 changed files with 0 additions and 28753 deletions

View File

@ -3919,11 +3919,6 @@ F: certs/
F: scripts/sign-file.c
F: scripts/extract-cert.c
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
L: devel@driverdev.osuosl.org
S: Obsolete
F: drivers/staging/wusbcore/
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
S: Maintained
@ -17094,11 +17089,6 @@ S: Maintained
F: drivers/usb/common/ulpi.c
F: include/linux/ulpi/
ULTRA-WIDEBAND (UWB) SUBSYSTEM:
L: devel@driverdev.osuosl.org
S: Obsolete
F: drivers/staging/uwb/
UNICODE SUBSYSTEM:
M: Gabriel Krisman Bertazi <krisman@collabora.com>
L: linux-fsdevel@vger.kernel.org

View File

@ -112,9 +112,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/wusbcore/Kconfig"
source "drivers/staging/uwb/Kconfig"
source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"

View File

@ -46,8 +46,6 @@ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_STAGING_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NET_VENDOR_HP) += hp/

View File

@ -1,72 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# UWB device configuration
#
menuconfig UWB
tristate "Ultra Wideband devices"
default n
select GENERIC_NET_UTILS
help
UWB is a high-bandwidth, low-power, point-to-point radio
technology using a wide spectrum (3.1-10.6GHz). It is
optimized for in-room use (480Mbps at 2 meters, 110Mbps at
10m). It serves as the transport layer for other protocols,
such as Wireless USB (WUSB).
The topology is peer to peer; however, higher level
protocols (such as WUSB) might impose a master/slave
relationship.
Say Y here if your computer has UWB radio controllers (USB or PCI)
based. You will need to enable the radio controllers
below. It is ok to select all of them, no harm done.
For more help check the UWB and WUSB related files in
<file:Documentation/usb/>.
To compile the UWB stack as a module, choose M here.
if UWB
config UWB_HWA
tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
depends on USB
help
This driver enables the radio controller for HWA USB
devices. HWA stands for Host Wire Adapter, and it is a UWB
Radio Controller connected to your system via USB. Most of
them come with a Wireless USB host controller also.
To compile this driver select Y (built in) or M (module). It
is safe to select any even if you do not have the hardware.
config UWB_WHCI
tristate "UWB Radio Control driver for WHCI-compliant cards"
depends on PCI
help
This driver enables the radio controller for WHCI cards.
WHCI is a specification developed by Intel
(http://www.intel.com/technology/comms/wusb/whci.htm) much
in the spirit of USB's EHCI, but for UWB and Wireless USB
radio/host controllers connected via memory mapping (eg:
PCI). Most of these cards come also with a Wireless USB host
controller.
To compile this driver select Y (built in) or M (module). It
is safe to select any even if you do not have the hardware.
config UWB_I1480U
tristate "Support for Intel Wireless UWB Link 1480 HWA"
depends on UWB_HWA
select FW_LOADER
help
This driver enables support for the i1480 when connected via
USB. It consists of a firmware uploader that will enable it
to behave as an HWA device.
To compile this driver select Y (built in) or M (module). It
is safe to select any even if you do not have the hardware.
endif # UWB

View File

@ -1,32 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_UWB) += uwb.o
obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
obj-$(CONFIG_UWB_HWA) += hwa-rc.o
obj-$(CONFIG_UWB_I1480U) += i1480/
uwb-objs := \
address.o \
allocator.o \
beacon.o \
driver.o \
drp.o \
drp-avail.o \
drp-ie.o \
est.o \
ie.o \
ie-rcv.o \
lc-dev.o \
lc-rc.o \
neh.o \
pal.o \
radio.o \
reset.o \
rsv.o \
scan.o \
uwb-debug.o \
uwbd.o
umc-objs := \
umc-bus.o \
umc-dev.o \
umc-drv.o

View File

@ -1,8 +0,0 @@
TODO: Remove in late 2019 unless there are users
There seems to not be any real wireless USB devices anywhere in the wild
anymore. It turned out to be a failed technology :(
This will be removed from the tree if no one objects.
Greg Kroah-Hartman <gregkh@linuxfoundation.org>

View File

@ -1,352 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Address management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/random.h>
#include <linux/etherdevice.h>
#include "uwb-internal.h"
/** Device Address Management command */
struct uwb_rc_cmd_dev_addr_mgmt {
struct uwb_rccb rccb;
u8 bmOperationType;
u8 baAddr[6];
} __attribute__((packed));
/**
* Low level command for setting/getting UWB radio's addresses
*
* @hwarc: HWA Radio Control interface instance
* @bmOperationType:
* Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
* @baAddr: address buffer--assumed to have enough data to hold
* the address type requested.
* @reply: Pointer to reply buffer (can be stack allocated)
* @returns: 0 if ok, < 0 errno code on error.
*
* @cmd has to be allocated because USB cannot grok USB or vmalloc
* buffers depending on your combination of host architecture.
*/
static
int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
u8 bmOperationType, const u8 *baAddr,
struct uwb_rc_evt_dev_addr_mgmt *reply)
{
int result;
struct uwb_rc_cmd_dev_addr_mgmt *cmd;
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_kzalloc;
cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
cmd->bmOperationType = bmOperationType;
if (baAddr) {
size_t size = 0;
switch (bmOperationType >> 1) {
case 0: size = 2; break;
case 1: size = 6; break;
default: BUG();
}
memcpy(cmd->baAddr, baAddr, size);
}
reply->rceb.bEventType = UWB_RC_CET_GENERAL;
reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
&cmd->rccb, sizeof(*cmd),
&reply->rceb, sizeof(*reply));
if (result < 0)
goto error_cmd;
if (result < sizeof(*reply)) {
dev_err(&rc->uwb_dev.dev,
"DEV-ADDR-MGMT: not enough data replied: "
"%d vs %zu bytes needed\n", result, sizeof(*reply));
result = -ENOMSG;
} else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply->bResultCode),
reply->bResultCode);
result = -EIO;
} else
result = 0;
error_cmd:
kfree(cmd);
error_kzalloc:
return result;
}
/**
* Set the UWB RC MAC or device address.
*
* @rc: UWB Radio Controller
* @_addr: Pointer to address to write [assumed to be either a
* 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
* @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
* @returns: 0 if ok, < 0 errno code on error.
*
* Some anal retentivity here: even if both 'struct
* uwb_{dev,mac}_addr' have the actual byte array in the same offset
* and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
* to use some syntatic sugar in case someday we decide to change the
* format of the structs. The compiler will optimize it out anyway.
*/
static int uwb_rc_addr_set(struct uwb_rc *rc,
const void *_addr, enum uwb_addr_type type)
{
int result;
u8 bmOperationType = 0x1; /* Set address */
const struct uwb_dev_addr *dev_addr = _addr;
const struct uwb_mac_addr *mac_addr = _addr;
struct uwb_rc_evt_dev_addr_mgmt reply;
const u8 *baAddr;
result = -EINVAL;
switch (type) {
case UWB_ADDR_DEV:
baAddr = dev_addr->data;
break;
case UWB_ADDR_MAC:
baAddr = mac_addr->data;
bmOperationType |= 0x2;
break;
default:
return result;
}
return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
}
/**
* Get the UWB radio's MAC or device address.
*
* @rc: UWB Radio Controller
* @_addr: Where to write the address data [assumed to be either a
* 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
* @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
* @returns: 0 if ok (and *_addr set), < 0 errno code on error.
*
* See comment in uwb_rc_addr_set() about anal retentivity in the
* type handling of the address variables.
*/
static int uwb_rc_addr_get(struct uwb_rc *rc,
void *_addr, enum uwb_addr_type type)
{
int result;
u8 bmOperationType = 0x0; /* Get address */
struct uwb_rc_evt_dev_addr_mgmt evt;
struct uwb_dev_addr *dev_addr = _addr;
struct uwb_mac_addr *mac_addr = _addr;
u8 *baAddr;
result = -EINVAL;
switch (type) {
case UWB_ADDR_DEV:
baAddr = dev_addr->data;
break;
case UWB_ADDR_MAC:
bmOperationType |= 0x2;
baAddr = mac_addr->data;
break;
default:
return result;
}
result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
if (result == 0)
switch (type) {
case UWB_ADDR_DEV:
memcpy(&dev_addr->data, evt.baAddr,
sizeof(dev_addr->data));
break;
case UWB_ADDR_MAC:
memcpy(&mac_addr->data, evt.baAddr,
sizeof(mac_addr->data));
break;
default: /* shut gcc up */
BUG();
}
return result;
}
/** Get @rc's MAC address to @addr */
int uwb_rc_mac_addr_get(struct uwb_rc *rc,
struct uwb_mac_addr *addr) {
return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
}
EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
/** Get @rc's device address to @addr */
int uwb_rc_dev_addr_get(struct uwb_rc *rc,
struct uwb_dev_addr *addr) {
return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
}
EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
/** Set @rc's address to @addr */
int uwb_rc_mac_addr_set(struct uwb_rc *rc,
const struct uwb_mac_addr *addr)
{
int result = -EINVAL;
mutex_lock(&rc->uwb_dev.mutex);
result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
/** Set @rc's address to @addr */
int uwb_rc_dev_addr_set(struct uwb_rc *rc,
const struct uwb_dev_addr *addr)
{
int result = -EINVAL;
mutex_lock(&rc->uwb_dev.mutex);
result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
rc->uwb_dev.dev_addr = *addr;
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
/* Returns !0 if given address is already assigned to device. */
int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_mac_addr *addr = _addr;
if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
return !0;
return 0;
}
/* Returns !0 if given address is already assigned to device. */
int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_dev_addr *addr = _addr;
if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
return !0;
return 0;
}
/**
* uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
* @rc: the (local) radio controller device requiring a new DevAddr
*
* A new DevAddr is required when:
* - first setting up a radio controller
* - if the hardware reports a DevAddr conflict
*
* The DevAddr is randomly generated in the generated DevAddr range
* [0x100, 0xfeff]. The number of devices in a beacon group is limited
* by mMaxBPLength (96) so this address space will never be exhausted.
*
* [ECMA-368] 17.1.1, 17.16.
*/
int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
{
struct uwb_dev_addr new_addr;
do {
get_random_bytes(new_addr.data, sizeof(new_addr.data));
} while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
|| __uwb_dev_addr_assigned(rc, &new_addr));
return uwb_rc_dev_addr_set(rc, &new_addr);
}
/**
* uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
* @evt: the DEV_ADDR_CONFLICT notification from the radio controller
*
* A new (non-conflicting) DevAddr is assigned to the radio controller.
*
* [ECMA-368] 17.1.1.1.
*/
int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
return uwb_rc_dev_addr_assign(rc);
}
/*
* Print the 48-bit EUI MAC address of the radio controller when
* reading /sys/class/uwb_rc/XX/mac_address
*/
static ssize_t uwb_rc_mac_addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
struct uwb_mac_addr addr;
ssize_t result;
mutex_lock(&rc->uwb_dev.mutex);
result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
mutex_unlock(&rc->uwb_dev.mutex);
if (result >= 0) {
result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
buf[result++] = '\n';
}
return result;
}
/*
* Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
* and if correct, set it.
*/
static ssize_t uwb_rc_mac_addr_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
struct uwb_mac_addr addr;
ssize_t result;
if (!mac_pton(buf, addr.data))
return -EINVAL;
if (is_multicast_ether_addr(addr.data)) {
dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
"MAC address %s\n", buf);
return -EINVAL;
}
result = uwb_rc_mac_addr_set(rc, &addr);
if (result == 0)
rc->uwb_dev.mac_addr = addr;
return result < 0 ? result : size;
}
DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
/** Print @addr to @buf, @return bytes written */
size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
int type)
{
size_t result;
if (type)
result = scnprintf(buf, buf_size, "%pM", addr);
else
result = scnprintf(buf, buf_size, "%02x:%02x",
addr[1], addr[0]);
return result;
}
EXPORT_SYMBOL_GPL(__uwb_addr_print);

View File

@ -1,374 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB reservation management.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "uwb.h"
#include "uwb-internal.h"
static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai)
{
int col, mas, safe_mas, unsafe_mas;
unsigned char *bm = ai->bm;
struct uwb_rsv_col_info *ci = ai->ci;
unsigned char c;
for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) {
safe_mas = ci->csi.safe_mas_per_col;
unsafe_mas = ci->csi.unsafe_mas_per_col;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) {
if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) {
if (safe_mas > 0) {
safe_mas--;
c = UWB_RSV_MAS_SAFE;
} else if (unsafe_mas > 0) {
unsafe_mas--;
c = UWB_RSV_MAS_UNSAFE;
} else {
break;
}
bm[col * UWB_MAS_PER_ZONE + mas] = c;
}
}
}
}
static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai)
{
int mas, col, rows;
unsigned char *bm = ai->bm;
struct uwb_rsv_row_info *ri = &ai->ri;
unsigned char c;
rows = 1;
c = UWB_RSV_MAS_SAFE;
for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) {
if (ri->avail[mas] == 1) {
if (rows > ri->used_rows) {
break;
} else if (rows > 7) {
c = UWB_RSV_MAS_UNSAFE;
}
for (col = 0; col < UWB_NUM_ZONES; col++) {
if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) {
bm[col * UWB_NUM_ZONES + mas] = c;
if(c == UWB_RSV_MAS_SAFE)
ai->safe_allocated_mases++;
else
ai->unsafe_allocated_mases++;
}
}
rows++;
}
}
ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
}
/*
* Find the best column set for a given availability, interval, num safe mas and
* num unsafe mas.
*
* The different sets are tried in order as shown below, depending on the interval.
*
* interval = 16
* deep = 0
* set 1 -> { 8 }
* deep = 1
* set 1 -> { 4 }
* set 2 -> { 12 }
* deep = 2
* set 1 -> { 2 }
* set 2 -> { 6 }
* set 3 -> { 10 }
* set 4 -> { 14 }
* deep = 3
* set 1 -> { 1 }
* set 2 -> { 3 }
* set 3 -> { 5 }
* set 4 -> { 7 }
* set 5 -> { 9 }
* set 6 -> { 11 }
* set 7 -> { 13 }
* set 8 -> { 15 }
*
* interval = 8
* deep = 0
* set 1 -> { 4 12 }
* deep = 1
* set 1 -> { 2 10 }
* set 2 -> { 6 14 }
* deep = 2
* set 1 -> { 1 9 }
* set 2 -> { 3 11 }
* set 3 -> { 5 13 }
* set 4 -> { 7 15 }
*
* interval = 4
* deep = 0
* set 1 -> { 2 6 10 14 }
* deep = 1
* set 1 -> { 1 5 9 13 }
* set 2 -> { 3 7 11 15 }
*
* interval = 2
* deep = 0
* set 1 -> { 1 3 5 7 9 11 13 15 }
*/
static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval,
int num_safe_mas, int num_unsafe_mas)
{
struct uwb_rsv_col_info *ci = ai->ci;
struct uwb_rsv_col_set_info *csi = &ci->csi;
struct uwb_rsv_col_set_info tmp_csi;
int deep, set, col, start_col_deep, col_start_set;
int start_col, max_mas_in_set, lowest_max_mas_in_deep;
int n_mas;
int found = UWB_RSV_ALLOC_NOT_FOUND;
tmp_csi.start_col = 0;
start_col_deep = interval;
n_mas = num_unsafe_mas + num_safe_mas;
for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) {
start_col_deep /= 2;
col_start_set = 0;
lowest_max_mas_in_deep = UWB_MAS_PER_ZONE;
for (set = 1; set <= (1 << deep); set++) {
max_mas_in_set = 0;
start_col = start_col_deep + col_start_set;
for (col = start_col; col < UWB_NUM_ZONES; col += interval) {
if (ci[col].max_avail_safe >= num_safe_mas &&
ci[col].max_avail_unsafe >= n_mas) {
if (ci[col].highest_mas[n_mas] > max_mas_in_set)
max_mas_in_set = ci[col].highest_mas[n_mas];
} else {
max_mas_in_set = 0;
break;
}
}
if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) {
lowest_max_mas_in_deep = max_mas_in_set;
tmp_csi.start_col = start_col;
}
col_start_set += (interval >> deep);
}
if (lowest_max_mas_in_deep < 8) {
csi->start_col = tmp_csi.start_col;
found = UWB_RSV_ALLOC_FOUND;
break;
} else if ((lowest_max_mas_in_deep > 8) &&
(lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) &&
(found == UWB_RSV_ALLOC_NOT_FOUND)) {
csi->start_col = tmp_csi.start_col;
found = UWB_RSV_ALLOC_FOUND;
}
}
if (found == UWB_RSV_ALLOC_FOUND) {
csi->interval = interval;
csi->safe_mas_per_col = num_safe_mas;
csi->unsafe_mas_per_col = num_unsafe_mas;
ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas;
ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas;
ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
ai->interval = interval;
}
return found;
}
static void get_row_descriptors(struct uwb_rsv_alloc_info *ai)
{
unsigned char *bm = ai->bm;
struct uwb_rsv_row_info *ri = &ai->ri;
int col, mas;
ri->free_rows = 16;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
ri->avail[mas] = 1;
for (col = 1; col < UWB_NUM_ZONES; col++) {
if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) {
ri->free_rows--;
ri->avail[mas]=0;
break;
}
}
}
}
static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci)
{
int mas;
int block_count = 0, start_block = 0;
int previous_avail = 0;
int available = 0;
int safe_mas_in_row[UWB_MAS_PER_ZONE] = {
8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
};
rci->max_avail_safe = 0;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
if (!bm[column * UWB_NUM_ZONES + mas]) {
available++;
rci->max_avail_unsafe = available;
rci->highest_mas[available] = mas;
if (previous_avail) {
block_count++;
if ((block_count > safe_mas_in_row[start_block]) &&
(!rci->max_avail_safe))
rci->max_avail_safe = available - 1;
} else {
previous_avail = 1;
start_block = mas;
block_count = 1;
}
} else {
previous_avail = 0;
}
}
if (!rci->max_avail_safe)
rci->max_avail_safe = rci->max_avail_unsafe;
}
static void get_column_descriptors(struct uwb_rsv_alloc_info *ai)
{
unsigned char *bm = ai->bm;
struct uwb_rsv_col_info *ci = ai->ci;
int col;
for (col = 1; col < UWB_NUM_ZONES; col++) {
uwb_rsv_fill_column_info(bm, col, &ci[col]);
}
}
static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai)
{
int n_rows;
int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW;
int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW;
if (ai->min_mas % UWB_USABLE_MAS_PER_ROW)
min_rows++;
for (n_rows = max_rows; n_rows >= min_rows; n_rows--) {
if (n_rows <= ai->ri.free_rows) {
ai->ri.used_rows = n_rows;
ai->interval = 1; /* row reservation */
uwb_rsv_fill_row_alloc(ai);
return UWB_RSV_ALLOC_FOUND;
}
}
return UWB_RSV_ALLOC_NOT_FOUND;
}
static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval)
{
int n_safe, n_unsafe, n_mas;
int n_column = UWB_NUM_ZONES / interval;
int max_per_zone = ai->max_mas / n_column;
int min_per_zone = ai->min_mas / n_column;
if (ai->min_mas % n_column)
min_per_zone++;
if (min_per_zone > UWB_MAS_PER_ZONE) {
return UWB_RSV_ALLOC_NOT_FOUND;
}
if (max_per_zone > UWB_MAS_PER_ZONE) {
max_per_zone = UWB_MAS_PER_ZONE;
}
for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) {
if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND)
continue;
for (n_safe = n_mas; n_safe >= 0; n_safe--) {
n_unsafe = n_mas - n_safe;
if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) {
uwb_rsv_fill_column_alloc(ai);
return UWB_RSV_ALLOC_FOUND;
}
}
}
return UWB_RSV_ALLOC_NOT_FOUND;
}
int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
struct uwb_mas_bm *result)
{
struct uwb_rsv_alloc_info *ai;
int interval;
int bit_index;
ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
if (!ai)
return UWB_RSV_ALLOC_NOT_FOUND;
ai->min_mas = rsv->min_mas;
ai->max_mas = rsv->max_mas;
ai->max_interval = rsv->max_interval;
/* fill the not available vector from the available bm */
for_each_clear_bit(bit_index, available->bm, UWB_NUM_MAS)
ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL;
if (ai->max_interval == 1) {
get_row_descriptors(ai);
if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
else
goto alloc_not_found;
}
get_column_descriptors(ai);
for (interval = 16; interval >= 2; interval>>=1) {
if (interval > ai->max_interval)
continue;
if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
}
/* try row reservation if no column is found */
get_row_descriptors(ai);
if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
else
goto alloc_not_found;
alloc_found:
bitmap_zero(result->bm, UWB_NUM_MAS);
bitmap_zero(result->unsafe_bm, UWB_NUM_MAS);
/* fill the safe and unsafe bitmaps */
for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE)
set_bit(bit_index, result->bm);
else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE)
set_bit(bit_index, result->unsafe_bm);
}
bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS);
result->safe = ai->safe_allocated_mases;
result->unsafe = ai->unsafe_allocated_mases;
kfree(ai);
return UWB_RSV_ALLOC_FOUND;
alloc_not_found:
kfree(ai);
return UWB_RSV_ALLOC_NOT_FOUND;
}

View File

@ -1,595 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Beacon management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
#include "uwb-internal.h"
/* Start Beaconing command structure */
struct uwb_rc_cmd_start_beacon {
struct uwb_rccb rccb;
__le16 wBPSTOffset;
u8 bChannelNumber;
} __attribute__((packed));
static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
{
int result;
struct uwb_rc_cmd_start_beacon *cmd;
struct uwb_rc_evt_confirm reply;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
cmd->bChannelNumber = channel;
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"START-BEACON: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
}
error_cmd:
kfree(cmd);
return result;
}
static int uwb_rc_stop_beacon(struct uwb_rc *rc)
{
int result;
struct uwb_rccb *cmd;
struct uwb_rc_evt_confirm reply;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->bCommandType = UWB_RC_CET_GENERAL;
cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"STOP-BEACON: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
}
error_cmd:
kfree(cmd);
return result;
}
/*
* Start/stop beacons
*
* @rc: UWB Radio Controller to operate on
* @channel: UWB channel on which to beacon (WUSB[table
* 5-12]). If -1, stop beaconing.
* @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
*
* According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
* of a SET IE command after the device sent the first beacon that includes
* the IEs specified in the SET IE command. So, after we start beaconing we
* check if there is anything in the IE cache and call the SET IE command
* if needed.
*/
int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
dev_dbg(dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
channel = -1;
if (channel == -1)
result = uwb_rc_stop_beacon(rc);
else {
/* channel >= 0...dah */
result = uwb_rc_start_beacon(rc, bpst_offset, channel);
if (result < 0) {
dev_err(dev, "Cannot start beaconing: %d\n", result);
return result;
}
if (le16_to_cpu(rc->ies->wIELength) > 0) {
result = uwb_rc_set_ie(rc, rc->ies);
if (result < 0) {
dev_err(dev, "Cannot set new IE on device: "
"%d\n", result);
result = uwb_rc_stop_beacon(rc);
channel = -1;
bpst_offset = 0;
}
}
}
if (result >= 0)
rc->beaconing = channel;
return result;
}
/*
* Beacon cache
*
* The purpose of this is to speed up the lookup of becon information
* when a new beacon arrives. The UWB Daemon uses it also to keep a
* tab of which devices are in radio distance and which not. When a
* device's beacon stays present for more than a certain amount of
* time, it is considered a new, usable device. When a beacon ceases
* to be received for a certain amount of time, it is considered that
* the device is gone.
*
* FIXME: use an allocator for the entries
* FIXME: use something faster for search than a list
*/
void uwb_bce_kfree(struct kref *_bce)
{
struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
kfree(bce->be);
kfree(bce);
}
/* Find a beacon by dev addr in the cache */
static
struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc,
const struct uwb_dev_addr *dev_addr)
{
struct uwb_beca_e *bce, *next;
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
goto out;
}
bce = NULL;
out:
return bce;
}
/* Find a beacon by dev addr in the cache */
static
struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc,
const struct uwb_mac_addr *mac_addr)
{
struct uwb_beca_e *bce, *next;
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
if (!memcmp(bce->mac_addr, mac_addr->data,
sizeof(struct uwb_mac_addr)))
goto out;
}
bce = NULL;
out:
return bce;
}
/**
* uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
* @rc: the radio controller that saw the device
* @devaddr: DevAddr of the UWB device to find
*
* There may be more than one matching device (in the case of a
* DevAddr conflict), but only the first one is returned.
*/
struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
const struct uwb_dev_addr *devaddr)
{
struct uwb_dev *found = NULL;
struct uwb_beca_e *bce;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bydev(rc, devaddr);
if (bce)
found = uwb_dev_try_get(rc, bce->uwb_dev);
mutex_unlock(&rc->uwb_beca.mutex);
return found;
}
/**
* uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
* @rc: the radio controller that saw the device
* @devaddr: EUI-48 of the UWB device to find
*/
struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
const struct uwb_mac_addr *macaddr)
{
struct uwb_dev *found = NULL;
struct uwb_beca_e *bce;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bymac(rc, macaddr);
if (bce)
found = uwb_dev_try_get(rc, bce->uwb_dev);
mutex_unlock(&rc->uwb_beca.mutex);
return found;
}
/* Initialize a beacon cache entry */
static void uwb_beca_e_init(struct uwb_beca_e *bce)
{
mutex_init(&bce->mutex);
kref_init(&bce->refcnt);
stats_init(&bce->lqe_stats);
stats_init(&bce->rssi_stats);
}
/*
* Add a beacon to the cache
*
* @be: Beacon event information
* @bf: Beacon frame (part of b, really)
* @ts_jiffies: Timestamp (in jiffies) when the beacon was received
*/
static
struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc,
struct uwb_rc_evt_beacon *be,
struct uwb_beacon_frame *bf,
unsigned long ts_jiffies)
{
struct uwb_beca_e *bce;
bce = kzalloc(sizeof(*bce), GFP_KERNEL);
if (bce == NULL)
return NULL;
uwb_beca_e_init(bce);
bce->ts_jiffies = ts_jiffies;
bce->uwb_dev = NULL;
list_add(&bce->node, &rc->uwb_beca.list);
return bce;
}
/*
* Wipe out beacon entries that became stale
*
* Remove associated devicest too.
*/
void uwb_beca_purge(struct uwb_rc *rc)
{
struct uwb_beca_e *bce, *next;
unsigned long expires;
mutex_lock(&rc->uwb_beca.mutex);
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
if (time_after(jiffies, expires)) {
uwbd_dev_offair(bce);
}
}
mutex_unlock(&rc->uwb_beca.mutex);
}
/* Clean up the whole beacon cache. Called on shutdown */
void uwb_beca_release(struct uwb_rc *rc)
{
struct uwb_beca_e *bce, *next;
mutex_lock(&rc->uwb_beca.mutex);
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
list_del(&bce->node);
uwb_bce_put(bce);
}
mutex_unlock(&rc->uwb_beca.mutex);
}
static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
struct uwb_beacon_frame *bf)
{
char macbuf[UWB_ADDR_STRSIZE];
char devbuf[UWB_ADDR_STRSIZE];
char dstbuf[UWB_ADDR_STRSIZE];
uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
dev_info(&rc->uwb_dev.dev,
"BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
bf->Beacon_Slot_Number, macbuf);
}
/*
* @bce: beacon cache entry, referenced
*/
ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
char *buf, size_t size)
{
ssize_t result = 0;
struct uwb_rc_evt_beacon *be;
struct uwb_beacon_frame *bf;
int ies_len;
struct uwb_ie_hdr *ies;
mutex_lock(&bce->mutex);
be = bce->be;
if (be) {
bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame);
ies = (struct uwb_ie_hdr *)bf->IEData;
result = uwb_ie_dump_hex(ies, ies_len, buf, size);
}
mutex_unlock(&bce->mutex);
return result;
}
/*
* Verify that the beacon event, frame and IEs are ok
*/
static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
struct uwb_rc_evt_beacon *be)
{
int result = -EINVAL;
struct uwb_beacon_frame *bf;
struct device *dev = &rc->uwb_dev.dev;
/* Is there enough data to decode a beacon frame? */
if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
dev_err(dev, "BEACON event: Not enough data to decode "
"(%zu vs %zu bytes needed)\n", evt->notif.size,
sizeof(*be) + sizeof(*bf));
goto error;
}
/* FIXME: make sure beacon frame IEs are fine and that the whole thing
* is consistent */
result = 0;
error:
return result;
}
/*
* Handle UWB_RC_EVT_BEACON events
*
* We check the beacon cache to see how the received beacon fares. If
* is there already we refresh the timestamp. If not we create a new
* entry.
*
* According to the WHCI and WUSB specs, only one beacon frame is
* allowed per notification block, so we don't bother about scanning
* for more.
*/
int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
{
int result = -EINVAL;
struct uwb_rc *rc;
struct uwb_rc_evt_beacon *be;
struct uwb_beacon_frame *bf;
struct uwb_beca_e *bce;
rc = evt->rc;
be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
result = uwb_verify_beacon(rc, evt, be);
if (result < 0)
return result;
/* FIXME: handle alien beacons. */
if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
return -ENOSYS;
}
bf = (struct uwb_beacon_frame *) be->BeaconInfo;
/*
* Drop beacons from devices with a NULL EUI-48 -- they cannot
* be uniquely identified.
*
* It's expected that these will all be WUSB devices and they
* have a WUSB specific connection method so ignoring them
* here shouldn't be a problem.
*/
if (uwb_mac_addr_bcast(&bf->Device_Identifier))
return 0;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier);
if (bce == NULL) {
/* Not in there, a new device is pinging */
uwb_beacon_print(evt->rc, be, bf);
bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies);
if (bce == NULL) {
mutex_unlock(&rc->uwb_beca.mutex);
return -ENOMEM;
}
}
mutex_unlock(&rc->uwb_beca.mutex);
mutex_lock(&bce->mutex);
/* purge old beacon data */
kfree(bce->be);
/* Update commonly used fields */
bce->ts_jiffies = evt->ts_jiffies;
bce->be = be;
bce->dev_addr = bf->hdr.SrcAddr;
bce->mac_addr = &bf->Device_Identifier;
be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
/*
* This might be a beacon from a new device.
*/
if (bce->uwb_dev == NULL)
uwbd_dev_onair(evt->rc, bce);
mutex_unlock(&bce->mutex);
return 1; /* we keep the event data */
}
/*
* Handle UWB_RC_EVT_BEACON_SIZE events
*
* XXXXX
*/
int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_beacon_size *bs;
/* Is there enough data to decode the event? */
if (evt->notif.size < sizeof(*bs)) {
dev_err(dev, "BEACON SIZE notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*bs));
goto error;
}
bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
if (0)
dev_info(dev, "Beacon size changed to %u bytes "
"(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
else {
/* temporary hack until we do something with this message... */
static unsigned count;
if (++count % 1000 == 0)
dev_info(dev, "Beacon size changed %u times "
"(FIXME: action?)\n", count);
}
result = 0;
error:
return result;
}
/**
* uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
* @evt: the BP_SLOT_CHANGE notification from the radio controller
*
* If the event indicates that no beacon period slots were available
* then radio controller has transitioned to a non-beaconing state.
* Otherwise, simply save the current beacon slot.
*/
int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_evt_bp_slot_change *bpsc;
if (evt->notif.size < sizeof(*bpsc)) {
dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
return -EINVAL;
}
bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
dev_err(dev, "stopped beaconing: No free slots in BP\n");
mutex_lock(&rc->uwb_dev.mutex);
rc->beaconing = -1;
mutex_unlock(&rc->uwb_dev.mutex);
} else
rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
return 0;
}
/**
* Handle UWB_RC_EVT_BPOIE_CHANGE events
*
* XXXXX
*/
struct uwb_ie_bpo {
struct uwb_ie_hdr hdr;
u8 bp_length;
u8 data[];
} __attribute__((packed));
int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_bpoie_change *bpoiec;
struct uwb_ie_bpo *bpoie;
static unsigned count; /* FIXME: this is a temp hack */
size_t iesize;
/* Is there enough data to decode it? */
if (evt->notif.size < sizeof(*bpoiec)) {
dev_err(dev, "BPOIEC notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*bpoiec));
goto error;
}
bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
iesize = le16_to_cpu(bpoiec->wBPOIELength);
if (iesize < sizeof(*bpoie)) {
dev_err(dev, "BPOIEC notification: Not enough IE data to "
"decode (%zu vs %zu bytes needed)\n",
iesize, sizeof(*bpoie));
goto error;
}
if (++count % 1000 == 0) /* Lame placeholder */
dev_info(dev, "BPOIE: %u changes received\n", count);
/*
* FIXME: At this point we should go over all the IEs in the
* bpoiec->BPOIE array and act on each.
*/
result = 0;
error:
return result;
}
/*
* Print beaconing state.
*/
static ssize_t uwb_rc_beacon_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
ssize_t result;
mutex_lock(&rc->uwb_dev.mutex);
result = sprintf(buf, "%d\n", rc->beaconing);
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
/*
* Start beaconing on the specified channel, or stop beaconing.
*/
static ssize_t uwb_rc_beacon_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
int channel;
ssize_t result = -EINVAL;
result = sscanf(buf, "%d", &channel);
if (result >= 1)
result = uwb_radio_force_channel(rc, channel);
return result < 0 ? result : size;
}
DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);

View File

@ -1,143 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Driver initialization, etc
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*
* Life cycle: FIXME: explain
*
* UWB radio controller:
*
* 1. alloc a uwb_rc, zero it
* 2. call uwb_rc_init() on it to set it up + ops (won't do any
* kind of allocation)
* 3. register (now it is owned by the UWB stack--deregister before
* freeing/destroying).
* 4. It lives on it's own now (UWB stack handles)--when it
* disconnects, call unregister()
* 5. free it.
*
* Make sure you have a reference to the uwb_rc before calling
* any of the UWB API functions.
*
* TODO:
*
* 1. Locking and life cycle management is crappy still. All entry
* points to the UWB HCD API assume you have a reference on the
* uwb_rc structure and that it won't go away. They mutex lock it
* before doing anything.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/random.h>
#include "uwb-internal.h"
/* UWB stack attributes (or 'global' constants) */
/**
* If a beacon disappears for longer than this, then we consider the
* device who was represented by that beacon to be gone.
*
* ECMA-368[17.2.3, last para] establishes that a device must not
* consider a device to be its neighbour if he doesn't receive a beacon
* for more than mMaxLostBeacons. mMaxLostBeacons is defined in
* ECMA-368[17.16] as 3; because we can get only one beacon per
* superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
* for jitter and stuff and make it 500 ms.
*/
unsigned long beacon_timeout_ms = 500;
static
ssize_t beacon_timeout_ms_show(struct class *class,
struct class_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
}
static
ssize_t beacon_timeout_ms_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t size)
{
unsigned long bt;
ssize_t result;
result = sscanf(buf, "%lu", &bt);
if (result != 1)
return -EINVAL;
beacon_timeout_ms = bt;
return size;
}
static CLASS_ATTR_RW(beacon_timeout_ms);
static struct attribute *uwb_class_attrs[] = {
&class_attr_beacon_timeout_ms.attr,
NULL,
};
ATTRIBUTE_GROUPS(uwb_class);
/** Device model classes */
struct class uwb_rc_class = {
.name = "uwb_rc",
.class_groups = uwb_class_groups,
};
static int __init uwb_subsys_init(void)
{
int result = 0;
result = uwb_est_create();
if (result < 0) {
printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
goto error_est_init;
}
result = class_register(&uwb_rc_class);
if (result < 0)
goto error_uwb_rc_class_register;
/* Register the UWB bus */
result = bus_register(&uwb_bus_type);
if (result) {
pr_err("%s - registering bus driver failed\n", __func__);
goto exit_bus;
}
uwb_dbg_init();
return 0;
exit_bus:
class_unregister(&uwb_rc_class);
error_uwb_rc_class_register:
uwb_est_destroy();
error_est_init:
return result;
}
module_init(uwb_subsys_init);
static void __exit uwb_subsys_exit(void)
{
uwb_dbg_exit();
bus_unregister(&uwb_bus_type);
class_unregister(&uwb_rc_class);
uwb_est_destroy();
return;
}
module_exit(uwb_subsys_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Ultra Wide Band core");
MODULE_LICENSE("GPL");

View File

@ -1,278 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* DRP availability management
*
* Copyright (C) 2005-2006 Intel Corporation
* Reinette Chatre <reinette.chatre@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* Manage DRP Availability (the MAS available for DRP
* reservations). Thus:
*
* - Handle DRP Availability Change notifications
*
* - Allow the reservation manager to indicate MAS reserved/released
* by local (owned by/targeted at the radio controller)
* reservations.
*
* - Based on the two sources above, generate a DRP Availability IE to
* be included in the beacon.
*
* See also the documentation for struct uwb_drp_avail.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/**
* uwb_drp_avail_init - initialize an RC's MAS availability
*
* All MAS are available initially. The RC will inform use which
* slots are used for the BP (it may change in size).
*/
void uwb_drp_avail_init(struct uwb_rc *rc)
{
bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
}
/*
* Determine MAS available for new local reservations.
*
* avail = global & local & pending
*/
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
{
bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
}
/**
* uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*
* Returns 0 on success, or -EBUSY if the MAS requested aren't available.
*/
int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
struct uwb_mas_bm avail;
uwb_drp_available(rc, &avail);
if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
return -EBUSY;
bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
return 0;
}
/**
* uwb_drp_avail_reserve - reserve MAS for an established reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*/
void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
}
/**
* uwb_drp_avail_release - release MAS from a pending or established reservation
* @rc: the radio controller
* @mas: the MAS to release
*/
void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
}
/**
* uwb_drp_avail_ie_update - update the DRP Availability IE
* @rc: the radio controller
*
* avail = global & local
*/
void uwb_drp_avail_ie_update(struct uwb_rc *rc)
{
struct uwb_mas_bm avail;
bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
rc->drp_avail.ie_valid = true;
}
/**
* Create an unsigned long from a buffer containing a byte stream.
*
* @array: pointer to buffer
* @itr: index of buffer from where we start
* @len: the buffer's remaining size may not be exact multiple of
* sizeof(unsigned long), @len is the length of buffer that needs
* to be converted. This will be sizeof(unsigned long) or smaller
* (BUG if not). If it is smaller then we will pad the remaining
* space of the result with zeroes.
*/
static
unsigned long get_val(u8 *array, size_t itr, size_t len)
{
unsigned long val = 0;
size_t top = itr + len;
BUG_ON(len > sizeof(val));
while (itr < top) {
val <<= 8;
val |= array[top - 1];
top--;
}
val <<= 8 * (sizeof(val) - len); /* padding */
return val;
}
/**
* Initialize bitmap from data buffer.
*
* The bitmap to be converted could come from a IE, for example a
* DRP Availability IE.
* From ECMA-368 1.0 [16.8.7]: "
* octets: 1 1 N * (0 to 32)
* Element ID Length (=N) DRP Availability Bitmap
*
* The DRP Availability Bitmap field is up to 256 bits long, one
* bit for each MAS in the superframe, where the least-significant
* bit of the field corresponds to the first MAS in the superframe
* and successive bits correspond to successive MASs."
*
* The DRP Availability bitmap is in octets from 0 to 32, so octet
* 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
* octets, the bits in octets not included at the end of the bitmap are
* treated as zero. In this case (when the bitmap is smaller than 32
* octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
* with the last octet still containing bits for MAS 1-8, etc.
*
* For example:
* F00F0102 03040506 0708090A 0B0C0D0E 0F010203
* ^^^^
* ||||
* ||||
* |||\LSB of byte is MAS 9
* ||\MSB of byte is MAS 16
* |\LSB of first byte is MAS 1
* \ MSB of byte is MAS 8
*
* An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
*
* The resulting bitmap will have the following mapping:
* bit position 0 == MAS 1
* bit position 1 == MAS 2
* ...
* bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
*
* @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
* @buffer: pointer to buffer containing bitmap data in big endian
* format (MSB first)
* @buffer_size:number of bytes with which bitmap should be initialized
*/
static
void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
size_t buffer_size)
{
u8 *buffer = _buffer;
size_t itr, len;
unsigned long val;
itr = 0;
while (itr < buffer_size) {
len = buffer_size - itr >= sizeof(val) ?
sizeof(val) : buffer_size - itr;
val = get_val(buffer, itr, len);
bmp_itr[itr / sizeof(val)] = val;
itr += sizeof(val);
}
}
/**
* Extract DRP Availability bitmap from the notification.
*
* The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
* We convert that to our internal representation.
*/
static
int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
{
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_drp_avail *drp_evt;
int result = -EINVAL;
/* Is there enough data to decode the event? */
if (evt->notif.size < sizeof(*drp_evt)) {
dev_err(dev, "DRP Availability Change: Not enough "
"data to decode event [%zu bytes, %zu "
"needed]\n", evt->notif.size, sizeof(*drp_evt));
goto error;
}
drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
result = 0;
error:
return result;
}
/**
* Process an incoming DRP Availability notification.
*
* @evt: Event information (packs the actual event data, which
* radio controller it came to, etc).
*
* @returns: 0 on success (so uwbd() frees the event buffer), < 0
* on error.
*
* According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
* the MAS slot is available, bits set to ZERO indicate that the slot
* is busy.
*
* So we clear available slots, we set used slots :)
*
* The notification only marks non-availability based on the BP and
* received DRP IEs that are not for this radio controller. A copy of
* this bitmap is needed to generate the real availability (which
* includes local and pending reservations).
*
* The DRP Availability IE that this radio controller emits will need
* to be updated.
*/
int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
{
int result;
struct uwb_rc *rc = evt->rc;
DECLARE_BITMAP(bmp, UWB_NUM_MAS);
result = uwbd_evt_get_drp_avail(evt, bmp);
if (result < 0)
return result;
mutex_lock(&rc->rsvs_mutex);
bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
mutex_unlock(&rc->rsvs_mutex);
uwb_rsv_sched_update(rc);
return 0;
}

View File

@ -1,305 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB DRP IE management.
*
* Copyright (C) 2005-2006 Intel Corporation
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include "uwb.h"
#include "uwb-internal.h"
/*
* Return the reason code for a reservations's DRP IE.
*/
static int uwb_rsv_reason_code(struct uwb_rsv *rsv)
{
static const int reason_codes[] = {
[UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
[UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return reason_codes[rsv->state];
}
/*
* Return the reason code for a reservations's companion DRP IE .
*/
static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
{
static const int companion_reason_codes[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return companion_reason_codes[rsv->state];
}
/*
* Return the status bit for a reservations's DRP IE.
*/
int uwb_rsv_status(struct uwb_rsv *rsv)
{
static const int statuses[] = {
[UWB_RSV_STATE_O_INITIATED] = 0,
[UWB_RSV_STATE_O_PENDING] = 0,
[UWB_RSV_STATE_O_MODIFIED] = 1,
[UWB_RSV_STATE_O_ESTABLISHED] = 1,
[UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
[UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
[UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
[UWB_RSV_STATE_T_ACCEPTED] = 1,
[UWB_RSV_STATE_T_CONFLICT] = 0,
[UWB_RSV_STATE_T_PENDING] = 0,
[UWB_RSV_STATE_T_DENIED] = 0,
[UWB_RSV_STATE_T_RESIZED] = 1,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
};
return statuses[rsv->state];
}
/*
* Return the status bit for a reservations's companion DRP IE .
*/
int uwb_rsv_companion_status(struct uwb_rsv *rsv)
{
static const int companion_statuses[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
};
return companion_statuses[rsv->state];
}
/*
* Allocate a DRP IE.
*
* To save having to free/allocate a DRP IE when its MAS changes,
* enough memory is allocated for the maxiumum number of DRP
* allocation fields. This gives an overhead per reservation of up to
* (UWB_NUM_ZONES - 1) * 4 = 60 octets.
*/
static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
{
struct uwb_ie_drp *drp_ie;
drp_ie = kzalloc(struct_size(drp_ie, allocs, UWB_NUM_ZONES),
GFP_KERNEL);
if (drp_ie)
drp_ie->hdr.element_id = UWB_IE_DRP;
return drp_ie;
}
/*
* Fill a DRP IE's allocation fields from a MAS bitmap.
*/
static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
struct uwb_mas_bm *mas)
{
int z, i, num_fields = 0, next = 0;
struct uwb_drp_alloc *zones;
__le16 current_bmp;
DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
zones = drp_ie->allocs;
bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
/* Determine unique MAS bitmaps in zones from bitmap. */
for (z = 0; z < UWB_NUM_ZONES; z++) {
bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
bool found = false;
current_bmp = (__le16) *tmp_mas_bm;
for (i = 0; i < next; i++) {
if (current_bmp == zones[i].mas_bm) {
zones[i].zone_bm |= 1 << z;
found = true;
break;
}
}
if (!found) {
num_fields++;
zones[next].zone_bm = 1 << z;
zones[next].mas_bm = current_bmp;
next++;
}
}
bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
}
/* Store in format ready for transmission (le16). */
for (i = 0; i < num_fields; i++) {
drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
}
drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
+ num_fields * sizeof(struct uwb_drp_alloc);
}
/**
* uwb_drp_ie_update - update a reservation's DRP IE
* @rsv: the reservation
*/
int uwb_drp_ie_update(struct uwb_rsv *rsv)
{
struct uwb_ie_drp *drp_ie;
struct uwb_rsv_move *mv;
int unsafe;
if (rsv->state == UWB_RSV_STATE_NONE) {
kfree(rsv->drp_ie);
rsv->drp_ie = NULL;
return 0;
}
unsafe = rsv->mas.unsafe ? 1 : 0;
if (rsv->drp_ie == NULL) {
rsv->drp_ie = uwb_drp_ie_alloc();
if (rsv->drp_ie == NULL)
return -ENOMEM;
}
drp_ie = rsv->drp_ie;
uwb_ie_drp_set_unsafe(drp_ie, unsafe);
uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
uwb_ie_drp_set_type(drp_ie, rsv->type);
if (uwb_rsv_is_owner(rsv)) {
switch (rsv->target.type) {
case UWB_RSV_TARGET_DEV:
drp_ie->dev_addr = rsv->target.dev->dev_addr;
break;
case UWB_RSV_TARGET_DEVADDR:
drp_ie->dev_addr = rsv->target.devaddr;
break;
}
} else
drp_ie->dev_addr = rsv->owner->dev_addr;
uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
if (uwb_rsv_has_two_drp_ies(rsv)) {
mv = &rsv->mv;
if (mv->companion_drp_ie == NULL) {
mv->companion_drp_ie = uwb_drp_ie_alloc();
if (mv->companion_drp_ie == NULL)
return -ENOMEM;
}
drp_ie = mv->companion_drp_ie;
/* keep all the same configuration of the main drp_ie */
memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
/* FIXME: handle properly the unsafe bit */
uwb_ie_drp_set_unsafe(drp_ie, 1);
uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
}
rsv->ie_valid = true;
return 0;
}
/*
* Set MAS bits from given MAS bitmap in a single zone of large bitmap.
*
* We are given a zone id and the MAS bitmap of bits that need to be set in
* this zone. Note that this zone may already have bits set and this only
* adds settings - we cannot simply assign the MAS bitmap contents to the
* zone contents. We iterate over the the bits (MAS) in the zone and set the
* bits that are set in the given MAS bitmap.
*/
static
void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
{
int mas;
u16 mas_mask;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
mas_mask = 1 << mas;
if (mas_bm & mas_mask)
set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
}
}
/**
* uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
* @mas: MAS bitmap that will be populated to correspond to the
* allocation fields in the DRP IE
* @drp_ie: the DRP IE that contains the allocation fields.
*
* The input format is an array of MAS allocation fields (16 bit Zone
* bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
* 16.8.6. The output is a full 256 bit MAS bitmap.
*
* We go over all the allocation fields, for each allocation field we
* know which zones are impacted. We iterate over all the zones
* impacted and call a function that will set the correct MAS bits in
* each zone.
*/
void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
{
int numallocs = (drp_ie->hdr.length - 4) / 4;
const struct uwb_drp_alloc *alloc;
int cnt;
u16 zone_bm, mas_bm;
u8 zone;
u16 zone_mask;
bitmap_zero(bm->bm, UWB_NUM_MAS);
for (cnt = 0; cnt < numallocs; cnt++) {
alloc = &drp_ie->allocs[cnt];
zone_bm = le16_to_cpu(alloc->zone_bm);
mas_bm = le16_to_cpu(alloc->mas_bm);
for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
zone_mask = 1 << zone;
if (zone_bm & zone_mask)
uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
}
}
}

View File

@ -1,842 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Dynamic Reservation Protocol handling
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "uwb-internal.h"
/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
enum uwb_drp_conflict_action {
/* Reservation is maintained, no action needed */
UWB_DRP_CONFLICT_MANTAIN = 0,
/* the device shall not transmit frames in conflicting MASs in
* the following superframe. If the device is the reservation
* target, it shall also set the Reason Code in its DRP IE to
* Conflict in its beacon in the following superframe.
*/
UWB_DRP_CONFLICT_ACT1,
/* the device shall not set the Reservation Status bit to ONE
* and shall not transmit frames in conflicting MASs. If the
* device is the reservation target, it shall also set the
* Reason Code in its DRP IE to Conflict.
*/
UWB_DRP_CONFLICT_ACT2,
/* the device shall not transmit frames in conflicting MASs in
* the following superframe. It shall remove the conflicting
* MASs from the reservation or set the Reservation Status to
* ZERO in its beacon in the following superframe. If the
* device is the reservation target, it shall also set the
* Reason Code in its DRP IE to Conflict.
*/
UWB_DRP_CONFLICT_ACT3,
};
static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
struct uwb_rceb *reply, ssize_t reply_size)
{
struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
unsigned long flags;
if (r != NULL) {
if (r->bResultCode != UWB_RC_RES_SUCCESS)
dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
uwb_rc_strerror(r->bResultCode), r->bResultCode);
} else
dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
spin_lock_irqsave(&rc->rsvs_lock, flags);
if (rc->set_drp_ie_pending > 1) {
rc->set_drp_ie_pending = 0;
uwb_rsv_queue_update(rc);
} else {
rc->set_drp_ie_pending = 0;
}
spin_unlock_irqrestore(&rc->rsvs_lock, flags);
}
/**
* Construct and send the SET DRP IE
*
* @rc: UWB Host controller
* @returns: >= 0 number of bytes still available in the beacon
* < 0 errno code on error.
*
* See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
* device to include in its beacon at the same time. We thus have to
* traverse all reservations and include the DRP IEs of all PENDING
* and NEGOTIATED reservations in a SET DRP command for transmission.
*
* A DRP Availability IE is appended.
*
* rc->rsvs_mutex is held
*
* FIXME We currently ignore the returned value indicating the remaining space
* in beacon. This could be used to deny reservation requests earlier if
* determined that they would cause the beacon space to be exceeded.
*/
int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
{
int result;
struct uwb_rc_cmd_set_drp_ie *cmd;
struct uwb_rsv *rsv;
struct uwb_rsv_move *mv;
int num_bytes = 0;
u8 *IEDataptr;
result = -ENOMEM;
/* First traverse all reservations to determine memory needed. */
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (rsv->drp_ie != NULL) {
num_bytes += rsv->drp_ie->hdr.length + 2;
if (uwb_rsv_has_two_drp_ies(rsv) &&
(rsv->mv.companion_drp_ie != NULL)) {
mv = &rsv->mv;
num_bytes +=
mv->companion_drp_ie->hdr.length + 2;
}
}
}
num_bytes += sizeof(rc->drp_avail.ie);
cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
if (cmd == NULL)
goto error;
cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
cmd->wIELength = num_bytes;
IEDataptr = (u8 *)&cmd->IEData[0];
/* FIXME: DRV avail IE is not always needed */
/* put DRP avail IE first */
memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
IEDataptr += sizeof(struct uwb_ie_drp_avail);
/* Next traverse all reservations to place IEs in allocated memory. */
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (rsv->drp_ie != NULL) {
memcpy(IEDataptr, rsv->drp_ie,
rsv->drp_ie->hdr.length + 2);
IEDataptr += rsv->drp_ie->hdr.length + 2;
if (uwb_rsv_has_two_drp_ies(rsv) &&
(rsv->mv.companion_drp_ie != NULL)) {
mv = &rsv->mv;
memcpy(IEDataptr, mv->companion_drp_ie,
mv->companion_drp_ie->hdr.length + 2);
IEDataptr +=
mv->companion_drp_ie->hdr.length + 2;
}
}
}
result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
&cmd->rccb, sizeof(*cmd) + num_bytes,
UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
uwb_rc_set_drp_cmd_done, NULL);
rc->set_drp_ie_pending = 1;
kfree(cmd);
error:
return result;
}
/*
* Evaluate the action to perform using conflict resolution rules
*
* Return a uwb_drp_conflict_action.
*/
static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
struct uwb_rsv *rsv, int our_status)
{
int our_tie_breaker = rsv->tiebreaker;
int our_type = rsv->type;
int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
int ext_status = uwb_ie_drp_status(ext_drp_ie);
int ext_type = uwb_ie_drp_type(ext_drp_ie);
/* [ECMA-368 2nd Edition] 17.4.6 */
if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
return UWB_DRP_CONFLICT_MANTAIN;
}
/* [ECMA-368 2nd Edition] 17.4.6-1 */
if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
return UWB_DRP_CONFLICT_MANTAIN;
}
/* [ECMA-368 2nd Edition] 17.4.6-2 */
if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
/* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
return UWB_DRP_CONFLICT_ACT1;
}
/* [ECMA-368 2nd Edition] 17.4.6-3 */
if (our_status == 0 && ext_status == 1) {
return UWB_DRP_CONFLICT_ACT2;
}
/* [ECMA-368 2nd Edition] 17.4.6-4 */
if (our_status == 1 && ext_status == 0) {
return UWB_DRP_CONFLICT_MANTAIN;
}
/* [ECMA-368 2nd Edition] 17.4.6-5a */
if (our_tie_breaker == ext_tie_breaker &&
our_beacon_slot < ext_beacon_slot) {
return UWB_DRP_CONFLICT_MANTAIN;
}
/* [ECMA-368 2nd Edition] 17.4.6-5b */
if (our_tie_breaker != ext_tie_breaker &&
our_beacon_slot > ext_beacon_slot) {
return UWB_DRP_CONFLICT_MANTAIN;
}
if (our_status == 0) {
if (our_tie_breaker == ext_tie_breaker) {
/* [ECMA-368 2nd Edition] 17.4.6-6a */
if (our_beacon_slot > ext_beacon_slot) {
return UWB_DRP_CONFLICT_ACT2;
}
} else {
/* [ECMA-368 2nd Edition] 17.4.6-6b */
if (our_beacon_slot < ext_beacon_slot) {
return UWB_DRP_CONFLICT_ACT2;
}
}
} else {
if (our_tie_breaker == ext_tie_breaker) {
/* [ECMA-368 2nd Edition] 17.4.6-7a */
if (our_beacon_slot > ext_beacon_slot) {
return UWB_DRP_CONFLICT_ACT3;
}
} else {
/* [ECMA-368 2nd Edition] 17.4.6-7b */
if (our_beacon_slot < ext_beacon_slot) {
return UWB_DRP_CONFLICT_ACT3;
}
}
}
return UWB_DRP_CONFLICT_MANTAIN;
}
static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
int ext_beacon_slot,
struct uwb_rsv *rsv,
struct uwb_mas_bm *conflicting_mas)
{
struct uwb_rc *rc = rsv->rc;
struct uwb_rsv_move *mv = &rsv->mv;
struct uwb_drp_backoff_win *bow = &rc->bow;
int action;
action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
if (uwb_rsv_is_owner(rsv)) {
switch(action) {
case UWB_DRP_CONFLICT_ACT2:
/* try move */
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
if (bow->can_reserve_extra_mases == false)
uwb_rsv_backoff_win_increment(rc);
break;
case UWB_DRP_CONFLICT_ACT3:
uwb_rsv_backoff_win_increment(rc);
/* drop some mases with reason modified */
/* put in the companion the mases to be dropped */
bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
default:
break;
}
} else {
switch(action) {
case UWB_DRP_CONFLICT_ACT2:
case UWB_DRP_CONFLICT_ACT3:
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
default:
break;
}
}
}
static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
struct uwb_rsv *rsv, bool companion_only,
struct uwb_mas_bm *conflicting_mas)
{
struct uwb_rc *rc = rsv->rc;
struct uwb_drp_backoff_win *bow = &rc->bow;
struct uwb_rsv_move *mv = &rsv->mv;
int action;
if (companion_only) {
/* status of companion is 0 at this point */
action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
if (uwb_rsv_is_owner(rsv)) {
switch(action) {
case UWB_DRP_CONFLICT_ACT2:
case UWB_DRP_CONFLICT_ACT3:
uwb_rsv_set_state(rsv,
UWB_RSV_STATE_O_ESTABLISHED);
rsv->needs_release_companion_mas = false;
if (bow->can_reserve_extra_mases == false)
uwb_rsv_backoff_win_increment(rc);
uwb_drp_avail_release(rsv->rc,
&rsv->mv.companion_mas);
}
} else { /* rsv is target */
switch(action) {
case UWB_DRP_CONFLICT_ACT2:
case UWB_DRP_CONFLICT_ACT3:
uwb_rsv_set_state(rsv,
UWB_RSV_STATE_T_EXPANDING_CONFLICT);
/* send_drp_avail_ie = true; */
}
}
} else { /* also base part of the reservation is conflicting */
if (uwb_rsv_is_owner(rsv)) {
uwb_rsv_backoff_win_increment(rc);
/* remove companion part */
uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
/* drop some mases with reason modified */
/* put in the companion the mases to be dropped */
bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
conflicting_mas->bm, UWB_NUM_MAS);
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
} else { /* it is a target rsv */
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
/* send_drp_avail_ie = true; */
}
}
}
static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
struct uwb_rc_evt_drp *drp_evt,
struct uwb_ie_drp *drp_ie,
struct uwb_mas_bm *conflicting_mas)
{
struct uwb_rsv_move *mv;
/* check if the conflicting reservation has two drp_ies */
if (uwb_rsv_has_two_drp_ies(rsv)) {
mv = &rsv->mv;
if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
UWB_NUM_MAS)) {
handle_conflict_expanding(drp_ie,
drp_evt->beacon_slot_number,
rsv, false, conflicting_mas);
} else {
if (bitmap_intersects(mv->companion_mas.bm,
conflicting_mas->bm, UWB_NUM_MAS)) {
handle_conflict_expanding(
drp_ie, drp_evt->beacon_slot_number,
rsv, true, conflicting_mas);
}
}
} else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
UWB_NUM_MAS)) {
handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
rsv, conflicting_mas);
}
}
static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
struct uwb_rc_evt_drp *drp_evt,
struct uwb_ie_drp *drp_ie,
struct uwb_mas_bm *conflicting_mas)
{
struct uwb_rsv *rsv;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
conflicting_mas);
}
}
static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
{
struct uwb_rsv_move *mv = &rsv->mv;
int status;
status = uwb_ie_drp_status(drp_ie);
if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
return;
}
if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
/* drp_ie is companion */
if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
/* stroke companion */
uwb_rsv_set_state(rsv,
UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
}
} else {
if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
/* FIXME: there is a conflict, find
* the conflicting reservations and
* take a sensible action. Consider
* that in drp_ie there is the
* "neighbour" */
uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
drp_ie, mas);
} else {
/* accept the extra reservation */
bitmap_copy(mv->companion_mas.bm, mas->bm,
UWB_NUM_MAS);
uwb_rsv_set_state(rsv,
UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
}
} else {
if (status) {
uwb_rsv_set_state(rsv,
UWB_RSV_STATE_T_ACCEPTED);
}
}
}
}
/*
* Based on the DRP IE, transition a target reservation to a new
* state.
*/
static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rsv_move *mv = &rsv->mv;
int status;
enum uwb_drp_reason reason_code;
struct uwb_mas_bm mas;
status = uwb_ie_drp_status(drp_ie);
reason_code = uwb_ie_drp_reason_code(drp_ie);
uwb_drp_ie_to_bm(&mas, drp_ie);
switch (reason_code) {
case UWB_DRP_REASON_ACCEPTED:
uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
break;
case UWB_DRP_REASON_MODIFIED:
/* check to see if we have already modified the reservation */
if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
break;
}
/* find if the owner wants to expand or reduce */
if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
/* owner is reducing */
bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
UWB_NUM_MAS);
uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
}
bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
break;
default:
dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
reason_code, status);
}
}
static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
struct uwb_mas_bm *mas)
{
struct uwb_rsv_move *mv = &rsv->mv;
switch (rsv->state) {
case UWB_RSV_STATE_O_PENDING:
case UWB_RSV_STATE_O_INITIATED:
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
break;
case UWB_RSV_STATE_O_MODIFIED:
if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
else
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
break;
case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
else
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
break;
case UWB_RSV_STATE_O_MOVE_EXPANDING:
if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
/* Companion reservation accepted */
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
} else {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
}
break;
case UWB_RSV_STATE_O_MOVE_COMBINING:
if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
else
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
break;
default:
break;
}
}
/*
* Based on the DRP IE, transition an owner reservation to a new
* state.
*/
static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
struct uwb_rc_evt_drp *drp_evt)
{
struct device *dev = &rc->uwb_dev.dev;
int status;
enum uwb_drp_reason reason_code;
struct uwb_mas_bm mas;
status = uwb_ie_drp_status(drp_ie);
reason_code = uwb_ie_drp_reason_code(drp_ie);
uwb_drp_ie_to_bm(&mas, drp_ie);
if (status) {
switch (reason_code) {
case UWB_DRP_REASON_ACCEPTED:
uwb_drp_process_owner_accepted(rsv, &mas);
break;
default:
dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
reason_code, status);
}
} else {
switch (reason_code) {
case UWB_DRP_REASON_PENDING:
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
break;
case UWB_DRP_REASON_DENIED:
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
break;
case UWB_DRP_REASON_CONFLICT:
/* resolve the conflict */
bitmap_complement(mas.bm, src->last_availability_bm,
UWB_NUM_MAS);
uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
break;
default:
dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
reason_code, status);
}
}
}
static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
{
unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
}
static void uwb_cnflt_update_work(struct work_struct *work)
{
struct uwb_cnflt_alien *cnflt = container_of(work,
struct uwb_cnflt_alien,
cnflt_update_work);
struct uwb_cnflt_alien *c;
struct uwb_rc *rc = cnflt->rc;
unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
mutex_lock(&rc->rsvs_mutex);
list_del(&cnflt->rc_node);
/* update rc global conflicting alien bitmap */
bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
c->mas.bm, UWB_NUM_MAS);
}
queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
usecs_to_jiffies(delay_us));
kfree(cnflt);
mutex_unlock(&rc->rsvs_mutex);
}
static void uwb_cnflt_timer(struct timer_list *t)
{
struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}
/*
* We have received an DRP_IE of type Alien BP and we need to make
* sure we do not transmit in conflicting MASs.
*/
static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_mas_bm mas;
struct uwb_cnflt_alien *cnflt;
unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
uwb_drp_ie_to_bm(&mas, drp_ie);
list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
/* Existing alien BP reservation conflicting
* bitmap, just reset the timer */
uwb_cnflt_alien_stroke_timer(cnflt);
return;
}
}
/* New alien BP reservation conflicting bitmap */
/* alloc and initialize new uwb_cnflt_alien */
cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
if (!cnflt) {
dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
return;
}
INIT_LIST_HEAD(&cnflt->rc_node);
timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
cnflt->rc = rc;
INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
/* update rc global conflicting alien bitmap */
bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
/* start the timer */
uwb_cnflt_alien_stroke_timer(cnflt);
}
static void uwb_drp_process_not_involved(struct uwb_rc *rc,
struct uwb_rc_evt_drp *drp_evt,
struct uwb_ie_drp *drp_ie)
{
struct uwb_mas_bm mas;
uwb_drp_ie_to_bm(&mas, drp_ie);
uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
}
static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
struct uwb_rc_evt_drp *drp_evt,
struct uwb_ie_drp *drp_ie)
{
struct uwb_rsv *rsv;
rsv = uwb_rsv_find(rc, src, drp_ie);
if (!rsv) {
/*
* No reservation? It's either for a recently
* terminated reservation; or the DRP IE couldn't be
* processed (e.g., an invalid IE or out of memory).
*/
return;
}
/*
* Do nothing with DRP IEs for reservations that have been
* terminated.
*/
if (rsv->state == UWB_RSV_STATE_NONE) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
return;
}
if (uwb_ie_drp_owner(drp_ie))
uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
else
uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
}
static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
{
return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
}
/*
* Process a received DRP IE.
*/
static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
{
if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
uwb_drp_handle_alien_drp(rc, drp_ie);
else if (uwb_drp_involves_us(rc, drp_ie))
uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
else
uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
}
/*
* Process a received DRP Availability IE
*/
static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
struct uwb_ie_drp_avail *drp_availability_ie)
{
bitmap_copy(src->last_availability_bm,
drp_availability_ie->bmp, UWB_NUM_MAS);
}
/*
* Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
* from a device.
*/
static
void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
size_t ielen, struct uwb_dev *src_dev)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_ie_hdr *ie_hdr;
void *ptr;
ptr = drp_evt->ie_data;
for (;;) {
ie_hdr = uwb_ie_next(&ptr, &ielen);
if (!ie_hdr)
break;
switch (ie_hdr->element_id) {
case UWB_IE_DRP_AVAILABILITY:
uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
break;
case UWB_IE_DRP:
uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
break;
default:
dev_warn(dev, "unexpected IE in DRP notification\n");
break;
}
}
if (ielen > 0)
dev_warn(dev, "%d octets remaining in DRP notification\n",
(int)ielen);
}
/**
* uwbd_evt_handle_rc_drp - handle a DRP_IE event
* @evt: the DRP_IE event from the radio controller
*
* This processes DRP notifications from the radio controller, either
* initiating a new reservation or transitioning an existing
* reservation into a different state.
*
* DRP notifications can occur for three different reasons:
*
* - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
* the target or source have been received.
*
* These DRP IEs could be new or for an existing reservation.
*
* If the DRP IE for an existing reservation ceases to be to
* received for at least mMaxLostBeacons, the reservation should be
* considered to be terminated. Note that the TERMINATE reason (see
* below) may not always be signalled (e.g., the remote device has
* two or more reservations established with the RC).
*
* - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
* group conflict with the RC's reservations.
*
* - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
* from a device (i.e., it's terminated all reservations).
*
* Only the software state of the reservations is changed; the setting
* of the radio controller's DRP IEs is done after all the events in
* an event buffer are processed. This saves waiting multiple times
* for the SET_DRP_IE command to complete.
*/
int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
{
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc *rc = evt->rc;
struct uwb_rc_evt_drp *drp_evt;
size_t ielength, bytes_left;
struct uwb_dev_addr src_addr;
struct uwb_dev *src_dev;
/* Is there enough data to decode the event (and any IEs in
its payload)? */
if (evt->notif.size < sizeof(*drp_evt)) {
dev_err(dev, "DRP event: Not enough data to decode event "
"[%zu bytes left, %zu needed]\n",
evt->notif.size, sizeof(*drp_evt));
return 0;
}
bytes_left = evt->notif.size - sizeof(*drp_evt);
drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
ielength = le16_to_cpu(drp_evt->ie_length);
if (bytes_left != ielength) {
dev_err(dev, "DRP event: Not enough data in payload [%zu"
"bytes left, %zu declared in the event]\n",
bytes_left, ielength);
return 0;
}
memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
if (!src_dev) {
/*
* A DRP notification from an unrecognized device.
*
* This is probably from a WUSB device that doesn't
* have an EUI-48 and therefore doesn't show up in the
* UWB device database. It's safe to simply ignore
* these.
*/
return 0;
}
mutex_lock(&rc->rsvs_mutex);
/* We do not distinguish from the reason */
uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
mutex_unlock(&rc->rsvs_mutex);
uwb_dev_put(src_dev);
return 0;
}

View File

@ -1,450 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band Radio Control
* Event Size Tables management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*
* Infrastructure, code and data tables for guessing the size of
* events received on the notification endpoints of UWB radio
* controllers.
*
* You define a table of events and for each, its size and how to get
* the extra size.
*
* ENTRY POINTS:
*
* uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
*
* uwb_est_[u]register(): To un/register event size tables
* uwb_est_grow()
*
* uwb_est_find_size(): Get the size of an event
* uwb_est_get_size()
*/
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "uwb-internal.h"
struct uwb_est {
u16 type_event_high;
u16 vendor, product;
u8 entries;
const struct uwb_est_entry *entry;
};
static struct uwb_est *uwb_est;
static u8 uwb_est_size;
static u8 uwb_est_used;
static DEFINE_RWLOCK(uwb_est_lock);
/**
* WUSB Standard Event Size Table, HWA-RC interface
*
* Sizes for events and notifications type 0 (general), high nibble 0.
*/
static
struct uwb_est_entry uwb_est_00_00xx[] = {
[UWB_RC_EVT_IE_RCV] = {
.size = sizeof(struct uwb_rc_evt_ie_rcv),
.offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
},
[UWB_RC_EVT_BEACON] = {
.size = sizeof(struct uwb_rc_evt_beacon),
.offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
},
[UWB_RC_EVT_BEACON_SIZE] = {
.size = sizeof(struct uwb_rc_evt_beacon_size),
},
[UWB_RC_EVT_BPOIE_CHANGE] = {
.size = sizeof(struct uwb_rc_evt_bpoie_change),
.offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
wBPOIELength),
},
[UWB_RC_EVT_BP_SLOT_CHANGE] = {
.size = sizeof(struct uwb_rc_evt_bp_slot_change),
},
[UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
.size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
.offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
},
[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
.size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
},
[UWB_RC_EVT_DRP_AVAIL] = {
.size = sizeof(struct uwb_rc_evt_drp_avail)
},
[UWB_RC_EVT_DRP] = {
.size = sizeof(struct uwb_rc_evt_drp),
.offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
},
[UWB_RC_EVT_BP_SWITCH_STATUS] = {
.size = sizeof(struct uwb_rc_evt_bp_switch_status),
},
[UWB_RC_EVT_CMD_FRAME_RCV] = {
.size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
.offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
},
[UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
.size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
.offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
},
[UWB_RC_CMD_CHANNEL_CHANGE] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_DEV_ADDR_MGMT] = {
.size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
[UWB_RC_CMD_GET_IE] = {
.size = sizeof(struct uwb_rc_evt_get_ie),
.offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
},
[UWB_RC_CMD_RESET] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SCAN] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SET_BEACON_FILTER] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SET_DRP_IE] = {
.size = sizeof(struct uwb_rc_evt_set_drp_ie),
},
[UWB_RC_CMD_SET_IE] = {
.size = sizeof(struct uwb_rc_evt_set_ie),
},
[UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SET_TX_POWER] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SLEEP] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_START_BEACON] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_STOP_BEACON] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_BP_MERGE] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SEND_COMMAND_FRAME] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
[UWB_RC_CMD_SET_ASIE_NOTIF] = {
.size = sizeof(struct uwb_rc_evt_confirm),
},
};
static
struct uwb_est_entry uwb_est_01_00xx[] = {
[UWB_RC_DAA_ENERGY_DETECTED] = {
.size = sizeof(struct uwb_rc_evt_daa_energy_detected),
},
[UWB_RC_SET_DAA_ENERGY_MASK] = {
.size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
},
[UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
.size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
},
};
/**
* Initialize the EST subsystem
*
* Register the standard tables also.
*
* FIXME: tag init
*/
int uwb_est_create(void)
{
int result;
uwb_est_size = 2;
uwb_est_used = 0;
uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
if (uwb_est == NULL)
return -ENOMEM;
result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
if (result < 0)
goto out;
result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
out:
return result;
}
/** Clean it up */
void uwb_est_destroy(void)
{
kfree(uwb_est);
uwb_est = NULL;
uwb_est_size = uwb_est_used = 0;
}
/**
* Double the capacity of the EST table
*
* @returns 0 if ok, < 0 errno no error.
*/
static
int uwb_est_grow(void)
{
size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
void *new = kmalloc_array(2, actual_size, GFP_ATOMIC);
if (new == NULL)
return -ENOMEM;
memcpy(new, uwb_est, actual_size);
memset(new + actual_size, 0, actual_size);
kfree(uwb_est);
uwb_est = new;
uwb_est_size *= 2;
return 0;
}
/**
* Register an event size table
*
* Makes room for it if the table is full, and then inserts it in the
* right position (entries are sorted by type, event_high, vendor and
* then product).
*
* @vendor: vendor code for matching against the device (0x0000 and
* 0xffff mean any); use 0x0000 to force all to match without
* checking possible vendor specific ones, 0xfffff to match
* after checking vendor specific ones.
*
* @product: product code from that vendor; same matching rules, use
* 0x0000 for not allowing vendor specific matches, 0xffff
* for allowing.
*
* This arragement just makes the tables sort differenty. Because the
* table is sorted by growing type-event_high-vendor-product, a zero
* vendor will match before than a 0x456a vendor, that will match
* before a 0xfffff vendor.
*
* @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
*/
/* FIXME: add bus type to vendor/product code */
int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
const struct uwb_est_entry *entry, size_t entries)
{
unsigned long flags;
unsigned itr;
int result = 0;
write_lock_irqsave(&uwb_est_lock, flags);
if (uwb_est_used == uwb_est_size) {
result = uwb_est_grow();
if (result < 0)
goto out;
}
/* Find the right spot to insert it in */
for (itr = 0; itr < uwb_est_used; itr++)
if (uwb_est[itr].type_event_high < type
&& uwb_est[itr].vendor < vendor
&& uwb_est[itr].product < product)
break;
/* Shift others to make room for the new one? */
if (itr < uwb_est_used)
memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
uwb_est[itr].type_event_high = type << 8 | event_high;
uwb_est[itr].vendor = vendor;
uwb_est[itr].product = product;
uwb_est[itr].entry = entry;
uwb_est[itr].entries = entries;
uwb_est_used++;
out:
write_unlock_irqrestore(&uwb_est_lock, flags);
return result;
}
EXPORT_SYMBOL_GPL(uwb_est_register);
/**
* Unregister an event size table
*
* This just removes the specified entry and moves the ones after it
* to fill in the gap. This is needed to keep the list sorted; no
* reallocation is done to reduce the size of the table.
*
* We unregister by all the data we used to register instead of by
* pointer to the @entry array because we might have used the same
* table for a bunch of IDs (for example).
*
* @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
*/
int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
const struct uwb_est_entry *entry, size_t entries)
{
unsigned long flags;
unsigned itr;
struct uwb_est est_cmp = {
.type_event_high = type << 8 | event_high,
.vendor = vendor,
.product = product,
.entry = entry,
.entries = entries
};
write_lock_irqsave(&uwb_est_lock, flags);
for (itr = 0; itr < uwb_est_used; itr++)
if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
goto found;
write_unlock_irqrestore(&uwb_est_lock, flags);
return -ENOENT;
found:
if (itr < uwb_est_used - 1) /* Not last one? move ones above */
memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
uwb_est_used--;
write_unlock_irqrestore(&uwb_est_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(uwb_est_unregister);
/**
* Get the size of an event from a table
*
* @rceb: pointer to the buffer with the event
* @rceb_size: size of the area pointed to by @rceb in bytes.
* @returns: > 0 Size of the event
* -ENOSPC An area big enough was not provided to look
* ahead into the event's guts and guess the size.
* -EINVAL Unknown event code (wEvent).
*
* This will look at the received RCEB and guess what is the total
* size. For variable sized events, it will look further ahead into
* their length field to see how much data should be read.
*
* Note this size is *not* final--the neh (Notification/Event Handle)
* might specificy an extra size to add.
*/
static
ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
u8 event_low, const struct uwb_rceb *rceb,
size_t rceb_size)
{
unsigned offset;
ssize_t size;
struct device *dev = &uwb_rc->uwb_dev.dev;
const struct uwb_est_entry *entry;
size = -ENOENT;
if (event_low >= est->entries) { /* in range? */
dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
est, est->type_event_high, est->vendor, est->product,
est->entries, event_low);
goto out;
}
size = -ENOENT;
entry = &est->entry[event_low];
if (entry->size == 0 && entry->offset == 0) { /* unknown? */
dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
est, est->type_event_high, est->vendor, est->product,
est->entries, event_low);
goto out;
}
offset = entry->offset; /* extra fries with that? */
if (offset == 0)
size = entry->size;
else {
/* Ops, got an extra size field at 'offset'--read it */
const void *ptr = rceb;
size_t type_size = 0;
offset--;
size = -ENOSPC; /* enough data for more? */
switch (entry->type) {
case UWB_EST_16: type_size = sizeof(__le16); break;
case UWB_EST_8: type_size = sizeof(u8); break;
default: BUG();
}
if (offset + type_size > rceb_size) {
dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
"not enough data to read extra size\n",
est, est->type_event_high, est->vendor,
est->product, est->entries);
goto out;
}
size = entry->size;
ptr += offset;
switch (entry->type) {
case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
case UWB_EST_8: size += *(u8 *)ptr; break;
default: BUG();
}
}
out:
return size;
}
/**
* Guesses the size of a WA event
*
* @rceb: pointer to the buffer with the event
* @rceb_size: size of the area pointed to by @rceb in bytes.
* @returns: > 0 Size of the event
* -ENOSPC An area big enough was not provided to look
* ahead into the event's guts and guess the size.
* -EINVAL Unknown event code (wEvent).
*
* This will look at the received RCEB and guess what is the total
* size by checking all the tables registered with
* uwb_est_register(). For variable sized events, it will look further
* ahead into their length field to see how much data should be read.
*
* Note this size is *not* final--the neh (Notification/Event Handle)
* might specificy an extra size to add or replace.
*/
ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
size_t rceb_size)
{
/* FIXME: add vendor/product data */
ssize_t size;
struct device *dev = &rc->uwb_dev.dev;
unsigned long flags;
unsigned itr;
u16 type_event_high, event;
read_lock_irqsave(&uwb_est_lock, flags);
size = -ENOSPC;
if (rceb_size < sizeof(*rceb))
goto out;
event = le16_to_cpu(rceb->wEvent);
type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
for (itr = 0; itr < uwb_est_used; itr++) {
if (uwb_est[itr].type_event_high != type_event_high)
continue;
size = uwb_est_get_size(rc, &uwb_est[itr],
event & 0x00ff, rceb, rceb_size);
/* try more tables that might handle the same type */
if (size != -ENOENT)
goto out;
}
dev_dbg(dev,
"event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
(unsigned) rceb->bEventType,
(unsigned) le16_to_cpu(rceb->wEvent),
(unsigned) rceb->bEventContext,
rceb);
size = -ENOENT;
out:
read_unlock_irqrestore(&uwb_est_lock, flags);
return size;
}
EXPORT_SYMBOL_GPL(uwb_est_find_size);

View File

@ -1,929 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
* Radio Control command/event transport
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Initialize the Radio Control interface Driver.
*
* For each device probed, creates an 'struct hwarc' which contains
* just the representation of the UWB Radio Controller, and the logic
* for reading notifications and passing them to the UWB Core.
*
* So we initialize all of those, register the UWB Radio Controller
* and setup the notification/event handle to pipe the notifications
* to the UWB management Daemon.
*
* Command and event filtering.
*
* This is the driver for the Radio Control Interface described in WUSB
* 1.0. The core UWB module assumes that all drivers are compliant to the
* WHCI 0.95 specification. We thus create a filter that parses all
* incoming messages from the (WUSB 1.0) device and manipulate them to
* conform to the WHCI 0.95 specification. Similarly, outgoing messages
* are parsed and manipulated to conform to the WUSB 1.0 compliant messages
* that the device expects. Only a few messages are affected:
* Affected events:
* UWB_RC_EVT_BEACON
* UWB_RC_EVT_BP_SLOT_CHANGE
* UWB_RC_EVT_DRP_AVAIL
* UWB_RC_EVT_DRP
* Affected commands:
* UWB_RC_CMD_SCAN
* UWB_RC_CMD_SET_DRP_IE
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "../wusbcore/include/wusb.h"
#include "../wusbcore/include/wusb-wa.h"
#include "uwb.h"
#include "uwb-internal.h"
/* The device uses commands and events from the WHCI specification, although
* reporting itself as WUSB compliant. */
#define WUSB_QUIRK_WHCI_CMD_EVT 0x01
/**
* Descriptor for an instance of the UWB Radio Control Driver that
* attaches to the RCI interface of the Host Wired Adapter.
*
* Unless there is a lock specific to the 'data members', all access
* is protected by uwb_rc->mutex.
*
* The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
* @rd_buffer. Note there is no locking because it is perfectly (heh!)
* serialized--probe() submits an URB, callback is called, processes
* the data (synchronously), submits another URB, and so on. There is
* no concurrent access to the buffer.
*/
struct hwarc {
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
struct uwb_rc *uwb_rc; /* UWB host controller */
struct urb *neep_urb; /* Notification endpoint handling */
struct edc neep_edc;
void *rd_buffer; /* NEEP read buffer */
};
/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
struct uwb_rc_evt_beacon_WUSB_0100 {
struct uwb_rceb rceb;
u8 bChannelNumber;
__le16 wBPSTOffset;
u8 bLQI;
u8 bRSSI;
__le16 wBeaconInfoLength;
u8 BeaconInfo[];
} __attribute__((packed));
/**
* Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
*
* @header: the incoming event
* @buf_size: size of buffer containing incoming event
* @new_size: size of event after filtering completed
*
* The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
* the time we receive the beacon from WUSB so we just set it to
* UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
* The solution below allocates memory upon receipt of every beacon from a
* WUSB device. This will deteriorate performance. What is the right way to
* do this?
*/
static
int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
struct uwb_rceb **header,
const size_t buf_size,
size_t *new_size)
{
struct uwb_rc_evt_beacon_WUSB_0100 *be;
struct uwb_rc_evt_beacon *newbe;
size_t bytes_left, ielength;
struct device *dev = &rc->uwb_dev.dev;
be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
bytes_left = buf_size;
if (bytes_left < sizeof(*be)) {
dev_err(dev, "Beacon Received Notification: Not enough data "
"to decode for filtering (%zu vs %zu bytes needed)\n",
bytes_left, sizeof(*be));
return -EINVAL;
}
bytes_left -= sizeof(*be);
ielength = le16_to_cpu(be->wBeaconInfoLength);
if (bytes_left < ielength) {
dev_err(dev, "Beacon Received Notification: Not enough data "
"to decode IEs (%zu vs %zu bytes needed)\n",
bytes_left, ielength);
return -EINVAL;
}
newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
if (newbe == NULL)
return -ENOMEM;
newbe->rceb = be->rceb;
newbe->bChannelNumber = be->bChannelNumber;
newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
newbe->wBPSTOffset = be->wBPSTOffset;
newbe->bLQI = be->bLQI;
newbe->bRSSI = be->bRSSI;
newbe->wBeaconInfoLength = be->wBeaconInfoLength;
memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
*header = &newbe->rceb;
*new_size = sizeof(*newbe) + ielength;
return 1; /* calling function will free memory */
}
/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
struct uwb_rc_evt_drp_avail_WUSB_0100 {
struct uwb_rceb rceb;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/**
* Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
*
* @header: the incoming event
* @buf_size: size of buffer containing incoming event
* @new_size: size of event after filtering completed
*/
static
int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
struct uwb_rceb **header,
const size_t buf_size,
size_t *new_size)
{
struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
struct uwb_rc_evt_drp_avail *newda;
struct uwb_ie_hdr *ie_hdr;
size_t bytes_left, ielength;
struct device *dev = &rc->uwb_dev.dev;
da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
bytes_left = buf_size;
if (bytes_left < sizeof(*da)) {
dev_err(dev, "Not enough data to decode DRP Avail "
"Notification for filtering. Expected %zu, "
"received %zu.\n", (size_t)sizeof(*da), bytes_left);
return -EINVAL;
}
bytes_left -= sizeof(*da);
ielength = le16_to_cpu(da->wIELength);
if (bytes_left < ielength) {
dev_err(dev, "DRP Avail Notification filter: IE length "
"[%zu bytes] does not match actual length "
"[%zu bytes].\n", ielength, bytes_left);
return -EINVAL;
}
if (ielength < sizeof(*ie_hdr)) {
dev_err(dev, "DRP Avail Notification filter: Not enough "
"data to decode IE [%zu bytes, %zu needed]\n",
ielength, sizeof(*ie_hdr));
return -EINVAL;
}
ie_hdr = (void *) da->IEData;
if (ie_hdr->length > 32) {
dev_err(dev, "DRP Availability Change event has unexpected "
"length for filtering. Expected < 32 bytes, "
"got %zu bytes.\n", (size_t)ie_hdr->length);
return -EINVAL;
}
newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
if (newda == NULL)
return -ENOMEM;
newda->rceb = da->rceb;
memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
*header = &newda->rceb;
*new_size = sizeof(*newda);
return 1; /* calling function will free memory */
}
/* DRP notification (WUSB 1.0 [8.6.3.9]) */
struct uwb_rc_evt_drp_WUSB_0100 {
struct uwb_rceb rceb;
struct uwb_dev_addr wSrcAddr;
u8 bExplicit;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/**
* Filter WUSB 1.0 DRP Notification to be WHCI 0.95
*
* @header: the incoming event
* @buf_size: size of buffer containing incoming event
* @new_size: size of event after filtering completed
*
* It is hard to manage DRP reservations without having a Reason code.
* Unfortunately there is none in the WUSB spec. We just set the default to
* DRP IE RECEIVED.
* We do not currently use the bBeaconSlotNumber value, so we set this to
* zero for now.
*/
static
int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
struct uwb_rceb **header,
const size_t buf_size,
size_t *new_size)
{
struct uwb_rc_evt_drp_WUSB_0100 *drpev;
struct uwb_rc_evt_drp *newdrpev;
size_t bytes_left, ielength;
struct device *dev = &rc->uwb_dev.dev;
drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
bytes_left = buf_size;
if (bytes_left < sizeof(*drpev)) {
dev_err(dev, "Not enough data to decode DRP Notification "
"for filtering. Expected %zu, received %zu.\n",
(size_t)sizeof(*drpev), bytes_left);
return -EINVAL;
}
ielength = le16_to_cpu(drpev->wIELength);
bytes_left -= sizeof(*drpev);
if (bytes_left < ielength) {
dev_err(dev, "DRP Notification filter: header length [%zu "
"bytes] does not match actual length [%zu "
"bytes].\n", ielength, bytes_left);
return -EINVAL;
}
newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
if (newdrpev == NULL)
return -ENOMEM;
newdrpev->rceb = drpev->rceb;
newdrpev->src_addr = drpev->wSrcAddr;
newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
newdrpev->beacon_slot_number = 0;
newdrpev->ie_length = drpev->wIELength;
memcpy(newdrpev->ie_data, drpev->IEData, ielength);
*header = &newdrpev->rceb;
*new_size = sizeof(*newdrpev) + ielength;
return 1; /* calling function will free memory */
}
/* Scan Command (WUSB 1.0 [8.6.2.5]) */
struct uwb_rc_cmd_scan_WUSB_0100 {
struct uwb_rccb rccb;
u8 bChannelNumber;
u8 bScanState;
} __attribute__((packed));
/**
* Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
*
* @header: command sent to device (compliant to WHCI 0.95)
* @size: size of command sent to device
*
* We only reduce the size by two bytes because the WUSB 1.0 scan command
* does not have the last field (wStarttime). Also, make sure we don't send
* the device an unexpected scan type.
*/
static
int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
struct uwb_rccb **header,
size_t *size)
{
struct uwb_rc_cmd_scan *sc;
sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
sc->bScanState = UWB_SCAN_ONLY;
/* Don't send the last two bytes. */
*size -= 2;
return 0;
}
/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
struct uwb_rccb rccb;
u8 bExplicit;
__le16 wIELength;
struct uwb_ie_drp IEData[];
} __attribute__((packed));
/**
* Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
*
* @header: command sent to device (compliant to WHCI 0.95)
* @size: size of command sent to device
*
* WUSB has an extra bExplicit field - we assume always explicit
* negotiation so this field is set. The command expected by the device is
* thus larger than the one prepared by the driver so we need to
* reallocate memory to accommodate this.
* We trust the driver to send us the correct data so no checking is done
* on incoming data - evn though it is variable length.
*/
static
int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
struct uwb_rccb **header,
size_t *size)
{
struct uwb_rc_cmd_set_drp_ie *orgcmd;
struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
size_t ielength;
orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
ielength = le16_to_cpu(orgcmd->wIELength);
cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->rccb = orgcmd->rccb;
cmd->bExplicit = 0;
cmd->wIELength = orgcmd->wIELength;
memcpy(cmd->IEData, orgcmd->IEData, ielength);
*header = &cmd->rccb;
*size = sizeof(*cmd) + ielength;
return 1; /* calling function will free memory */
}
/**
* Filter data from WHCI driver to WUSB device
*
* @header: WHCI 0.95 compliant command from driver
* @size: length of command
*
* The routine managing commands to the device (uwb_rc_cmd()) will call the
* filtering function pointer (if it exists) before it passes any data to
* the device. At this time the command has been formatted according to
* WHCI 0.95 and is ready to be sent to the device.
*
* The filter function will be provided with the current command and its
* length. The function will manipulate the command if necessary and
* potentially reallocate memory for a command that needed more memory that
* the given command. If new memory was created the function will return 1
* to indicate to the calling function that the memory need to be freed
* when not needed any more. The size will contain the new length of the
* command.
* If memory has not been allocated we rely on the original mechanisms to
* free the memory of the command - even when we reduce the value of size.
*/
static
int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
size_t *size)
{
int result;
struct uwb_rccb *rccb = *header;
int cmd = le16_to_cpu(rccb->wCommand);
switch (cmd) {
case UWB_RC_CMD_SCAN:
result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
break;
case UWB_RC_CMD_SET_DRP_IE:
result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
break;
default:
result = -ENOANO;
break;
}
return result;
}
/**
* Filter data from WHCI driver to WUSB device
*
* @header: WHCI 0.95 compliant command from driver
* @size: length of command
*
* Filter commands based on which protocol the device supports. The WUSB
* errata should be the same as WHCI 0.95 so we do not filter that here -
* only WUSB 1.0.
*/
static
int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
size_t *size)
{
int result = -ENOANO;
if (rc->version == 0x0100)
result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
return result;
}
/**
* Compute return value as sum of incoming value and value at given offset
*
* @rceb: event for which we compute the size, it contains a variable
* length field.
* @core_size: size of the "non variable" part of the event
* @offset: place in event where the length of the variable part is stored
* @buf_size: total length of buffer in which event arrived - we need to make
* sure we read the offset in memory that is still part of the event
*/
static
ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
size_t core_size, size_t offset,
const size_t buf_size)
{
ssize_t size = -ENOSPC;
const void *ptr = rceb;
size_t type_size = sizeof(__le16);
struct device *dev = &rc->uwb_dev.dev;
if (offset + type_size >= buf_size) {
dev_err(dev, "Not enough data to read extra size of event "
"0x%02x/%04x/%02x, only got %zu bytes.\n",
rceb->bEventType, le16_to_cpu(rceb->wEvent),
rceb->bEventContext, buf_size);
goto out;
}
ptr += offset;
size = core_size + le16_to_cpu(*(__le16 *)ptr);
out:
return size;
}
/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
struct uwb_rceb rceb;
u8 bSlotNumber;
} __attribute__((packed));
/**
* Filter data from WUSB device to WHCI driver
*
* @header: incoming event
* @buf_size: size of buffer in which event arrived
* @_event_size: actual size of event in the buffer
* @new_size: size of event after filtered
*
* We don't know how the buffer is constructed - there may be more than one
* event in it so buffer length does not determine event length. We first
* determine the expected size of the incoming event. This value is passed
* back only if the actual filtering succeeded (so we know the computed
* expected size is correct). This value will be zero if
* the event did not need any filtering.
*
* WHCI interprets the BP Slot Change event's data differently than
* WUSB. The event sizes are exactly the same. The data field
* indicates the new beacon slot in which a RC is transmitting its
* beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
* 17.16 (Table 117)). We thus know that the WUSB value will not set
* the bit bNoSlot, so we don't really do anything (placeholder).
*/
static
int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
const size_t buf_size, size_t *_real_size,
size_t *_new_size)
{
int result = -ENOANO;
struct uwb_rceb *rceb = *header;
int event = le16_to_cpu(rceb->wEvent);
ssize_t event_size;
size_t core_size, offset;
if (rceb->bEventType != UWB_RC_CET_GENERAL)
goto out;
switch (event) {
case UWB_RC_EVT_BEACON:
core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
wBeaconInfoLength);
event_size = hwarc_get_event_size(rc, rceb, core_size,
offset, buf_size);
if (event_size < 0)
goto out;
*_real_size = event_size;
result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
buf_size, _new_size);
break;
case UWB_RC_EVT_BP_SLOT_CHANGE:
*_new_size = *_real_size =
sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
result = 0;
break;
case UWB_RC_EVT_DRP_AVAIL:
core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
wIELength);
event_size = hwarc_get_event_size(rc, rceb, core_size,
offset, buf_size);
if (event_size < 0)
goto out;
*_real_size = event_size;
result = hwarc_filter_evt_drp_avail_WUSB_0100(
rc, header, buf_size, _new_size);
break;
case UWB_RC_EVT_DRP:
core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
event_size = hwarc_get_event_size(rc, rceb, core_size,
offset, buf_size);
if (event_size < 0)
goto out;
*_real_size = event_size;
result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
buf_size, _new_size);
break;
default:
break;
}
out:
return result;
}
/**
* Filter data from WUSB device to WHCI driver
*
* @header: incoming event
* @buf_size: size of buffer in which event arrived
* @_event_size: actual size of event in the buffer
* @_new_size: size of event after filtered
*
* Filter events based on which protocol the device supports. The WUSB
* errata should be the same as WHCI 0.95 so we do not filter that here -
* only WUSB 1.0.
*
* If we don't handle it, we return -ENOANO (why the weird error code?
* well, so if I get it, I can pinpoint in the code that raised
* it...after all, not too many places use the higher error codes).
*/
static
int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
const size_t buf_size, size_t *_real_size,
size_t *_new_size)
{
int result = -ENOANO;
if (rc->version == 0x0100)
result = hwarc_filter_event_WUSB_0100(
rc, header, buf_size, _real_size, _new_size);
return result;
}
/**
* Execute an UWB RC command on HWA
*
* @rc: Instance of a Radio Controller that is a HWA
* @cmd: Buffer containing the RCCB and payload to execute
* @cmd_size: Size of the command buffer.
*
* NOTE: rc's mutex has to be locked
*/
static
int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
{
struct hwarc *hwarc = uwb_rc->priv;
return usb_control_msg(
hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
(void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
}
static
int hwarc_reset(struct uwb_rc *uwb_rc)
{
struct hwarc *hwarc = uwb_rc->priv;
int result;
/* device lock must be held when calling usb_reset_device. */
result = usb_lock_device_for_reset(hwarc->usb_dev, NULL);
if (result >= 0) {
result = usb_reset_device(hwarc->usb_dev);
usb_unlock_device(hwarc->usb_dev);
}
return result;
}
/**
* Callback for the notification and event endpoint
*
* Check's that everything is fine and then passes the read data to
* the notification/event handling mechanism (neh).
*/
static
void hwarc_neep_cb(struct urb *urb)
{
struct hwarc *hwarc = urb->context;
struct usb_interface *usb_iface = hwarc->usb_iface;
struct device *dev = &usb_iface->dev;
int result;
switch (result = urb->status) {
case 0:
uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
urb->actual_length);
break;
case -ECONNRESET: /* Not an error, but a controlled situation; */
case -ENOENT: /* (we killed the URB)...so, no broadcast */
goto out;
case -ESHUTDOWN: /* going away! */
goto out;
default: /* On general errors, retry unless it gets ugly */
if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME))
goto error_exceeded;
dev_err(dev, "NEEP: URB error %d\n", urb->status);
}
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result < 0 && result != -ENODEV && result != -EPERM) {
/* ignoring unrecoverable errors */
dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
result);
goto error;
}
out:
return;
error_exceeded:
dev_err(dev, "NEEP: URB max acceptable errors "
"exceeded, resetting device\n");
error:
uwb_rc_neh_error(hwarc->uwb_rc, result);
uwb_rc_reset_all(hwarc->uwb_rc);
return;
}
static void hwarc_init(struct hwarc *hwarc)
{
edc_init(&hwarc->neep_edc);
}
/**
* Initialize the notification/event endpoint stuff
*
* Note this is effectively a parallel thread; it knows that
* hwarc->uwb_rc always exists because the existence of a 'hwarc'
* means that there is a reverence on the hwarc->uwb_rc (see
* _probe()), and thus _neep_cb() can execute safely.
*/
static int hwarc_neep_init(struct uwb_rc *rc)
{
struct hwarc *hwarc = rc->priv;
struct usb_interface *iface = hwarc->usb_iface;
struct usb_device *usb_dev = interface_to_usbdev(iface);
struct device *dev = &iface->dev;
int result;
struct usb_endpoint_descriptor *epd;
epd = &iface->cur_altsetting->endpoint[0].desc;
hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
if (hwarc->rd_buffer == NULL) {
dev_err(dev, "Unable to allocate notification's read buffer\n");
goto error_rd_buffer;
}
hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
if (hwarc->neep_urb == NULL)
goto error_urb_alloc;
usb_fill_int_urb(hwarc->neep_urb, usb_dev,
usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
hwarc->rd_buffer, PAGE_SIZE,
hwarc_neep_cb, hwarc, epd->bInterval);
result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "Cannot submit notification URB: %d\n", result);
goto error_neep_submit;
}
return 0;
error_neep_submit:
usb_free_urb(hwarc->neep_urb);
hwarc->neep_urb = NULL;
error_urb_alloc:
free_page((unsigned long)hwarc->rd_buffer);
hwarc->rd_buffer = NULL;
error_rd_buffer:
return -ENOMEM;
}
/** Clean up all the notification endpoint resources */
static void hwarc_neep_release(struct uwb_rc *rc)
{
struct hwarc *hwarc = rc->priv;
usb_kill_urb(hwarc->neep_urb);
usb_free_urb(hwarc->neep_urb);
hwarc->neep_urb = NULL;
free_page((unsigned long)hwarc->rd_buffer);
hwarc->rd_buffer = NULL;
}
/**
* Get the version from class-specific descriptor
*
* NOTE: this descriptor comes with the big bundled configuration
* descriptor that includes the interfaces' and endpoints', so
* we just look for it in the cached copy kept by the USB stack.
*
* NOTE2: We convert LE fields to CPU order.
*/
static int hwarc_get_version(struct uwb_rc *rc)
{
int result;
struct hwarc *hwarc = rc->priv;
struct uwb_rc_control_intf_class_desc *descr;
struct device *dev = &rc->uwb_dev.dev;
struct usb_device *usb_dev = hwarc->usb_dev;
char *itr;
struct usb_descriptor_header *hdr;
size_t itr_size, actconfig_idx;
u16 version;
actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
sizeof(usb_dev->config[0]);
itr = usb_dev->rawdescriptors[actconfig_idx];
itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
while (itr_size >= sizeof(*hdr)) {
hdr = (struct usb_descriptor_header *) itr;
dev_dbg(dev, "Extra device descriptor: "
"type %02x/%u bytes @ %zu (%zu left)\n",
hdr->bDescriptorType, hdr->bLength,
(itr - usb_dev->rawdescriptors[actconfig_idx]),
itr_size);
if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
goto found;
itr += hdr->bLength;
itr_size -= hdr->bLength;
}
dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
return -ENODEV;
found:
result = -EINVAL;
if (hdr->bLength > itr_size) { /* is it available? */
dev_err(dev, "incomplete Radio Control Interface Class "
"descriptor (%zu bytes left, %u needed)\n",
itr_size, hdr->bLength);
goto error;
}
if (hdr->bLength < sizeof(*descr)) {
dev_err(dev, "short Radio Control Interface Class "
"descriptor\n");
goto error;
}
descr = (struct uwb_rc_control_intf_class_desc *) hdr;
/* Make LE fields CPU order */
version = __le16_to_cpu(descr->bcdRCIVersion);
if (version != 0x0100) {
dev_err(dev, "Device reports protocol version 0x%04x. We "
"do not support that. \n", version);
result = -EINVAL;
goto error;
}
rc->version = version;
dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version);
result = 0;
error:
return result;
}
/*
* By creating a 'uwb_rc', we have a reference on it -- that reference
* is the one we drop when we disconnect.
*
* No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
* is only one altsetting allowed.
*/
static int hwarc_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
int result;
struct uwb_rc *uwb_rc;
struct hwarc *hwarc;
struct device *dev = &iface->dev;
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
return -ENODEV;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
dev_err(dev, "unable to allocate RC instance\n");
goto error_rc_alloc;
}
hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
if (hwarc == NULL) {
dev_err(dev, "unable to allocate HWA RC instance\n");
goto error_alloc;
}
hwarc_init(hwarc);
hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
hwarc->usb_iface = usb_get_intf(iface);
hwarc->uwb_rc = uwb_rc;
uwb_rc->owner = THIS_MODULE;
uwb_rc->start = hwarc_neep_init;
uwb_rc->stop = hwarc_neep_release;
uwb_rc->cmd = hwarc_cmd;
uwb_rc->reset = hwarc_reset;
if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
uwb_rc->filter_cmd = NULL;
uwb_rc->filter_event = NULL;
} else {
uwb_rc->filter_cmd = hwarc_filter_cmd;
uwb_rc->filter_event = hwarc_filter_event;
}
result = uwb_rc_add(uwb_rc, dev, hwarc);
if (result < 0)
goto error_rc_add;
result = hwarc_get_version(uwb_rc);
if (result < 0) {
dev_err(dev, "cannot retrieve version of RC \n");
goto error_get_version;
}
usb_set_intfdata(iface, hwarc);
return 0;
error_get_version:
uwb_rc_rm(uwb_rc);
error_rc_add:
usb_put_intf(iface);
usb_put_dev(hwarc->usb_dev);
kfree(hwarc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
return result;
}
static void hwarc_disconnect(struct usb_interface *iface)
{
struct hwarc *hwarc = usb_get_intfdata(iface);
struct uwb_rc *uwb_rc = hwarc->uwb_rc;
usb_set_intfdata(hwarc->usb_iface, NULL);
uwb_rc_rm(uwb_rc);
usb_put_intf(hwarc->usb_iface);
usb_put_dev(hwarc->usb_dev);
kfree(hwarc);
uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */
}
static int hwarc_pre_reset(struct usb_interface *iface)
{
struct hwarc *hwarc = usb_get_intfdata(iface);
struct uwb_rc *uwb_rc = hwarc->uwb_rc;
uwb_rc_pre_reset(uwb_rc);
return 0;
}
static int hwarc_post_reset(struct usb_interface *iface)
{
struct hwarc *hwarc = usb_get_intfdata(iface);
struct uwb_rc *uwb_rc = hwarc->uwb_rc;
return uwb_rc_post_reset(uwb_rc);
}
/** USB device ID's that we handle */
static const struct usb_device_id hwarc_id_table[] = {
/* D-Link DUB-1210 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
.driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
/* Intel i1480 (using firmware 1.3PA2-20070828) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
.driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
/* Alereon 5310 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x01, 0x02),
.driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
/* Alereon 5611 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x01, 0x02),
.driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
/* Generic match for the Radio Control interface */
{ USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
{ },
};
MODULE_DEVICE_TABLE(usb, hwarc_id_table);
static struct usb_driver hwarc_driver = {
.name = "hwa-rc",
.id_table = hwarc_id_table,
.probe = hwarc_probe,
.disconnect = hwarc_disconnect,
.pre_reset = hwarc_pre_reset,
.post_reset = hwarc_post_reset,
};
module_usb_driver(hwarc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
MODULE_LICENSE("GPL");

View File

@ -1,2 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
i1480-dfu-usb-objs := \
dfu.o \
mac.o \
phy.o \
usb.o

View File

@ -1,198 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Wireless UWB Link 1480
* Main driver
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Common code for firmware upload used by the USB and PCI version;
* i1480_fw_upload() takes a device descriptor and uses the function
* pointers it provides to upload firmware and prepare the PHY.
*
* As well, provides common functions used by the rest of the code.
*/
#include "i1480-dfu.h"
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/random.h>
#include <linux/export.h>
#include "../../uwb.h"
/*
* i1480_rceb_check - Check RCEB for expected field values
* @i1480: pointer to device for which RCEB is being checked
* @rceb: RCEB being checked
* @cmd: which command the RCEB is related to
* @context: expected context
* @expected_type: expected event type
* @expected_event: expected event
*
* If @cmd is NULL, do not print error messages, but still return an error
* code.
*
* Return 0 if @rceb matches the expected values, -EINVAL otherwise.
*/
int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
const char *cmd, u8 context, u8 expected_type,
unsigned expected_event)
{
int result = 0;
struct device *dev = i1480->dev;
if (rceb->bEventContext != context) {
if (cmd)
dev_err(dev, "%s: unexpected context id 0x%02x "
"(expected 0x%02x)\n", cmd,
rceb->bEventContext, context);
result = -EINVAL;
}
if (rceb->bEventType != expected_type) {
if (cmd)
dev_err(dev, "%s: unexpected event type 0x%02x "
"(expected 0x%02x)\n", cmd,
rceb->bEventType, expected_type);
result = -EINVAL;
}
if (le16_to_cpu(rceb->wEvent) != expected_event) {
if (cmd)
dev_err(dev, "%s: unexpected event 0x%04x "
"(expected 0x%04x)\n", cmd,
le16_to_cpu(rceb->wEvent), expected_event);
result = -EINVAL;
}
return result;
}
EXPORT_SYMBOL_GPL(i1480_rceb_check);
/*
* Execute a Radio Control Command
*
* Command data has to be in i1480->cmd_buf.
*
* @returns size of the reply data filled in i1480->evt_buf or < 0 errno
* code on error.
*/
ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
size_t reply_size)
{
ssize_t result;
struct uwb_rceb *reply = i1480->evt_buf;
struct uwb_rccb *cmd = i1480->cmd_buf;
u16 expected_event = reply->wEvent;
u8 expected_type = reply->bEventType;
u8 context;
init_completion(&i1480->evt_complete);
i1480->evt_result = -EINPROGRESS;
do {
get_random_bytes(&context, 1);
} while (context == 0x00 || context == 0xff);
cmd->bCommandContext = context;
result = i1480->cmd(i1480, cmd_name, cmd_size);
if (result < 0)
goto error;
/* wait for the callback to report a event was received */
result = wait_for_completion_interruptible_timeout(
&i1480->evt_complete, HZ);
if (result == 0) {
result = -ETIMEDOUT;
goto error;
}
if (result < 0)
goto error;
result = i1480->evt_result;
if (result < 0) {
dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
cmd_name, result);
goto error;
}
/*
* Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
* spurious notification after firmware is downloaded. So check whether
* the receibed RCEB is such notification before assuming that the
* command has failed.
*/
if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
0, 0xfd, 0x0022) == 0) {
/* Now wait for the actual RCEB for this command. */
result = i1480->wait_init_done(i1480);
if (result < 0)
goto error;
result = i1480->evt_result;
}
if (result != reply_size) {
dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
cmd_name, result, reply_size);
result = -EINVAL;
goto error;
}
/* Verify we got the right event in response */
result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
expected_type, expected_event);
error:
return result;
}
EXPORT_SYMBOL_GPL(i1480_cmd);
static
int i1480_print_state(struct i1480 *i1480)
{
int result;
u32 *buf = (u32 *) i1480->cmd_buf;
result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
if (result < 0) {
dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
goto error;
}
dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
error:
return result;
}
/*
* PCI probe, firmware uploader
*
* _mac_fw_upload() will call rc_setup(), which needs an rc_release().
*/
int i1480_fw_upload(struct i1480 *i1480)
{
int result;
result = i1480_pre_fw_upload(i1480); /* PHY pre fw */
if (result < 0 && result != -ENOENT) {
i1480_print_state(i1480);
goto error;
}
result = i1480_mac_fw_upload(i1480); /* MAC fw */
if (result < 0) {
if (result == -ENOENT)
dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
i1480->mac_fw_name);
else
i1480_print_state(i1480);
goto error;
}
result = i1480_phy_fw_upload(i1480); /* PHY fw */
if (result < 0 && result != -ENOENT) {
i1480_print_state(i1480);
goto error_rc_release;
}
/*
* FIXME: find some reliable way to check whether firmware is running
* properly. Maybe use some standard request that has no side effects?
*/
dev_info(i1480->dev, "firmware uploaded successfully\n");
error_rc_release:
if (i1480->rc_release)
i1480->rc_release(i1480);
result = 0;
error:
return result;
}
EXPORT_SYMBOL_GPL(i1480_fw_upload);

View File

@ -1,246 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* i1480 Device Firmware Upload
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This driver is the firmware uploader for the Intel Wireless UWB
* Link 1480 device (both in the USB and PCI incarnations).
*
* The process is quite simple: we stop the device, write the firmware
* to its memory and then restart it. Wait for the device to let us
* know it is done booting firmware. Ready.
*
* We might have to upload before or after a phy firmware (which might
* be done in two methods, using a normal firmware image or through
* the MPI port).
*
* Because USB and PCI use common methods, we just make ops out of the
* common operations (read, write, wait_init_done and cmd) and
* implement them in usb.c and pci.c.
*
* The flow is (some parts omitted):
*
* i1480_{usb,pci}_probe() On enumerate/discovery
* i1480_fw_upload()
* i1480_pre_fw_upload()
* __mac_fw_upload()
* fw_hdrs_load()
* mac_fw_hdrs_push()
* i1480->write() [i1480_{usb,pci}_write()]
* i1480_fw_cmp()
* i1480->read() [i1480_{usb,pci}_read()]
* i1480_mac_fw_upload()
* __mac_fw_upload()
* i1480->setup(()
* i1480->wait_init_done()
* i1480_cmd_reset()
* i1480->cmd() [i1480_{usb,pci}_cmd()]
* ...
* i1480_phy_fw_upload()
* request_firmware()
* i1480_mpi_write()
* i1480->cmd() [i1480_{usb,pci}_cmd()]
*
* Once the probe function enumerates the device and uploads the
* firmware, we just exit with -ENODEV, as we don't really want to
* attach to the device.
*/
#ifndef __i1480_DFU_H__
#define __i1480_DFU_H__
#include <linux/types.h>
#include <linux/completion.h>
#include "../../include/spec.h"
#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
#if i1480_FW > 0x00000302
#define i1480_RCEB_EXTENDED
#endif
struct uwb_rccb;
struct uwb_rceb;
/*
* Common firmware upload handlers
*
* Normally you embed this struct in another one specific to your hw.
*
* @write Write to device's memory from buffer.
* @read Read from device's memory to i1480->evt_buf.
* @setup Setup device after basic firmware is uploaded
* @wait_init_done
* Wait for the device to send a notification saying init
* is done.
* @cmd FOP for issuing the command to the hardware. The
* command data is contained in i1480->cmd_buf and the size
* is supplied as an argument. The command replied is put
* in i1480->evt_buf and the size in i1480->evt_result (or if
* an error, a < 0 errno code).
*
* @cmd_buf Memory buffer used to send commands to the device.
* Allocated by the upper layers i1480_fw_upload().
* Size has to be @buf_size.
* @evt_buf Memory buffer used to place the async notifications
* received by the hw. Allocated by the upper layers
* i1480_fw_upload().
* Size has to be @buf_size.
* @cmd_complete
* Low level driver uses this to notify code waiting afor
* an event that the event has arrived and data is in
* i1480->evt_buf (and size/result in i1480->evt_result).
* @hw_rev
* Use this value to activate dfu code to support new revisions
* of hardware. i1480_init() sets this to a default value.
* It should be updated by the USB and PCI code.
*/
struct i1480 {
struct device *dev;
int (*write)(struct i1480 *, u32 addr, const void *, size_t);
int (*read)(struct i1480 *, u32 addr, size_t);
int (*rc_setup)(struct i1480 *);
void (*rc_release)(struct i1480 *);
int (*wait_init_done)(struct i1480 *);
int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
const char *pre_fw_name;
const char *mac_fw_name;
const char *mac_fw_name_deprecate; /* FIXME: Will go away */
const char *phy_fw_name;
u8 hw_rev;
size_t buf_size; /* size of both evt_buf and cmd_buf */
void *evt_buf, *cmd_buf;
ssize_t evt_result;
struct completion evt_complete;
};
static inline
void i1480_init(struct i1480 *i1480)
{
i1480->hw_rev = 1;
init_completion(&i1480->evt_complete);
}
extern int i1480_fw_upload(struct i1480 *);
extern int i1480_pre_fw_upload(struct i1480 *);
extern int i1480_mac_fw_upload(struct i1480 *);
extern int i1480_phy_fw_upload(struct i1480 *);
extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
extern int i1480_rceb_check(const struct i1480 *,
const struct uwb_rceb *, const char *, u8,
u8, unsigned);
enum {
/* Vendor specific command type */
i1480_CET_VS1 = 0xfd,
/* i1480 commands */
i1480_CMD_SET_IP_MAS = 0x000e,
i1480_CMD_GET_MAC_PHY_INFO = 0x0003,
i1480_CMD_MPI_WRITE = 0x000f,
i1480_CMD_MPI_READ = 0x0010,
/* i1480 events */
#if i1480_FW > 0x00000302
i1480_EVT_CONFIRM = 0x0002,
i1480_EVT_RM_INIT_DONE = 0x0101,
i1480_EVT_DEV_ADD = 0x0103,
i1480_EVT_DEV_RM = 0x0104,
i1480_EVT_DEV_ID_CHANGE = 0x0105,
i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO,
#else
i1480_EVT_CONFIRM = 0x0002,
i1480_EVT_RM_INIT_DONE = 0x0101,
i1480_EVT_DEV_ADD = 0x0103,
i1480_EVT_DEV_RM = 0x0104,
i1480_EVT_DEV_ID_CHANGE = 0x0105,
i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM,
#endif
};
struct i1480_evt_confirm {
struct uwb_rceb rceb;
#ifdef i1480_RCEB_EXTENDED
__le16 wParamLength;
#endif
u8 bResultCode;
} __attribute__((packed));
struct i1480_rceb {
struct uwb_rceb rceb;
#ifdef i1480_RCEB_EXTENDED
__le16 wParamLength;
#endif
} __attribute__((packed));
/**
* Get MAC & PHY Information confirm event structure
*
* Confirm event returned by the command.
*/
struct i1480_evt_confirm_GMPI {
#if i1480_FW > 0x00000302
struct uwb_rceb rceb;
__le16 wParamLength;
__le16 status;
u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */
u8 dev_addr[2];
__le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
u8 hw_rev;
u8 phy_vendor;
u8 phy_rev; /* major v = >> 8; minor = v & 0xff */
__le16 mac_caps;
u8 phy_caps[3];
u8 key_stores;
__le16 mcast_addr_stores;
u8 sec_mode_supported;
#else
struct uwb_rceb rceb;
u8 status;
u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */
u8 dev_addr[2];
__le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
__le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */
__le16 mac_caps;
u8 phy_caps;
u8 key_stores;
__le16 mcast_addr_stores;
u8 sec_mode_supported;
#endif
} __attribute__((packed));
struct i1480_cmd_mpi_write {
struct uwb_rccb rccb;
__le16 size;
u8 data[];
};
struct i1480_cmd_mpi_read {
struct uwb_rccb rccb;
__le16 size;
struct {
u8 page, offset;
} __attribute__((packed)) data[];
} __attribute__((packed));
struct i1480_evt_mpi_read {
struct uwb_rceb rceb;
#ifdef i1480_RCEB_EXTENDED
__le16 wParamLength;
#endif
u8 bResultCode;
__le16 size;
struct {
u8 page, offset, value;
} __attribute__((packed)) data[];
} __attribute__((packed));
#endif /* #ifndef __i1480_DFU_H__ */

View File

@ -1,496 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Wireless UWB Link 1480
* MAC Firmware upload implementation
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Implementation of the code for parsing the firmware file (extract
* the headers and binary code chunks) in the fw_*() functions. The
* code to upload pre and mac firmwares is the same, so it uses a
* common entry point in __mac_fw_upload(), which uses the i1480
* function pointers to push the firmware to the device.
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include "../../uwb.h"
#include "i1480-dfu.h"
/*
* Descriptor for a continuous segment of MAC fw data
*/
struct fw_hdr {
unsigned long address;
size_t length;
const u32 *bin;
struct fw_hdr *next;
};
/* Free a chain of firmware headers */
static
void fw_hdrs_free(struct fw_hdr *hdr)
{
struct fw_hdr *next;
while (hdr) {
next = hdr->next;
kfree(hdr);
hdr = next;
}
}
/* Fill a firmware header descriptor from a memory buffer */
static
int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
const char *_data, const u32 *data_itr, const u32 *data_top)
{
size_t hdr_offset = (const char *) data_itr - _data;
size_t remaining_size = (void *) data_top - (void *) data_itr;
if (data_itr + 2 > data_top) {
dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
"offset %zu, limit %zu\n",
hdr_cnt, hdr_offset,
(const char *) data_itr + 2 - _data,
(const char *) data_top - _data);
return -EINVAL;
}
hdr->next = NULL;
hdr->address = le32_to_cpu(*data_itr++);
hdr->length = le32_to_cpu(*data_itr++);
hdr->bin = data_itr;
if (hdr->length > remaining_size) {
dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
"chunk too long (%zu bytes), only %zu left\n",
hdr_cnt, hdr_offset, hdr->length, remaining_size);
return -EINVAL;
}
return 0;
}
/**
* Get a buffer where the firmware is supposed to be and create a
* chain of headers linking them together.
*
* @phdr: where to place the pointer to the first header (headers link
* to the next via the @hdr->next ptr); need to free the whole
* chain when done.
*
* @_data: Pointer to the data buffer.
*
* @_data_size: Size of the data buffer (bytes); data size has to be a
* multiple of 4. Function will fail if not.
*
* Goes over the whole binary blob; reads the first chunk and creates
* a fw hdr from it (which points to where the data is in @_data and
* the length of the chunk); then goes on to the next chunk until
* done. Each header is linked to the next.
*/
static
int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
const char *_data, size_t data_size)
{
int result;
unsigned hdr_cnt = 0;
u32 *data = (u32 *) _data, *data_itr, *data_top;
struct fw_hdr *hdr, **prev_hdr = phdr;
result = -EINVAL;
/* Check size is ok and pointer is aligned */
if (data_size % sizeof(u32) != 0)
goto error;
if ((unsigned long) _data % sizeof(u16) != 0)
goto error;
*phdr = NULL;
data_itr = data;
data_top = (u32 *) (_data + data_size);
while (data_itr < data_top) {
result = -ENOMEM;
hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
if (hdr == NULL) {
dev_err(i1480->dev, "Cannot allocate fw header "
"for chunk #%u\n", hdr_cnt);
goto error_alloc;
}
result = fw_hdr_load(i1480, hdr, hdr_cnt,
_data, data_itr, data_top);
if (result < 0)
goto error_load;
data_itr += 2 + hdr->length;
*prev_hdr = hdr;
prev_hdr = &hdr->next;
hdr_cnt++;
};
*prev_hdr = NULL;
return 0;
error_load:
kfree(hdr);
error_alloc:
fw_hdrs_free(*phdr);
error:
return result;
}
/**
* Compares a chunk of fw with one in the devices's memory
*
* @i1480: Device instance
* @hdr: Pointer to the firmware chunk
* @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
* where the difference was found (plus one).
*
* Kind of dirty and simplistic, but does the trick in both the PCI
* and USB version. We do a quick[er] memcmp(), and if it fails, we do
* a byte-by-byte to find the offset.
*/
static
ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
{
ssize_t result = 0;
u32 src_itr = 0, cnt;
size_t size = hdr->length*sizeof(hdr->bin[0]);
size_t chunk_size;
u8 *bin = (u8 *) hdr->bin;
while (size > 0) {
chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
if (result < 0) {
dev_err(i1480->dev, "error reading for verification: "
"%zd\n", result);
goto error;
}
if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
u8 *buf = i1480->cmd_buf;
for (cnt = 0; cnt < result; cnt++)
if (bin[src_itr + cnt] != buf[cnt]) {
dev_err(i1480->dev, "byte failed at "
"src_itr %u cnt %u [0x%02x "
"vs 0x%02x]\n", src_itr, cnt,
bin[src_itr + cnt], buf[cnt]);
result = src_itr + cnt + 1;
goto cmp_failed;
}
}
src_itr += result;
size -= result;
}
result = 0;
error:
cmp_failed:
return result;
}
/**
* Writes firmware headers to the device.
*
* @prd: PRD instance
* @hdr: Processed firmware
* @returns: 0 if ok, < 0 errno on error.
*/
static
int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
const char *fw_name, const char *fw_tag)
{
struct device *dev = i1480->dev;
ssize_t result = 0;
struct fw_hdr *hdr_itr;
int verif_retry_count;
/* Now, header by header, push them to the hw */
for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
verif_retry_count = 0;
retry:
dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
hdr_itr->length * sizeof(hdr_itr->bin[0]),
hdr_itr->address);
result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
hdr_itr->length*sizeof(hdr_itr->bin[0]));
if (result < 0) {
dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
" %zd\n", fw_tag, fw_name,
hdr_itr->length * sizeof(hdr_itr->bin[0]),
hdr_itr->address, result);
break;
}
result = i1480_fw_cmp(i1480, hdr_itr);
if (result < 0) {
dev_err(dev, "%s fw '%s': verification read "
"failed (%zuB @ 0x%lx): %zd\n",
fw_tag, fw_name,
hdr_itr->length * sizeof(hdr_itr->bin[0]),
hdr_itr->address, result);
break;
}
if (result > 0) { /* Offset where it failed + 1 */
result--;
dev_err(dev, "%s fw '%s': WARNING: verification "
"failed at 0x%lx: retrying\n",
fw_tag, fw_name, hdr_itr->address + result);
if (++verif_retry_count < 3)
goto retry; /* write this block again! */
dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
"tried %d times\n", fw_tag, fw_name,
hdr_itr->address + result, verif_retry_count);
result = -EINVAL;
break;
}
}
return result;
}
/** Puts the device in firmware upload mode.*/
static
int mac_fw_upload_enable(struct i1480 *i1480)
{
int result;
u32 reg = 0x800000c0;
u32 *buffer = (u32 *)i1480->cmd_buf;
if (i1480->hw_rev > 1)
reg = 0x8000d0d4;
result = i1480->read(i1480, reg, sizeof(u32));
if (result < 0)
goto error_cmd;
*buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
result = i1480->write(i1480, reg, buffer, sizeof(u32));
if (result < 0)
goto error_cmd;
return 0;
error_cmd:
dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
return result;
}
/** Gets the device out of firmware upload mode. */
static
int mac_fw_upload_disable(struct i1480 *i1480)
{
int result;
u32 reg = 0x800000c0;
u32 *buffer = (u32 *)i1480->cmd_buf;
if (i1480->hw_rev > 1)
reg = 0x8000d0d4;
result = i1480->read(i1480, reg, sizeof(u32));
if (result < 0)
goto error_cmd;
*buffer |= i1480_FW_UPLOAD_MODE_MASK;
result = i1480->write(i1480, reg, buffer, sizeof(u32));
if (result < 0)
goto error_cmd;
return 0;
error_cmd:
dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
return result;
}
/**
* Generic function for uploading a MAC firmware.
*
* @i1480: Device instance
* @fw_name: Name of firmware file to upload.
* @fw_tag: Name of the firmware type (for messages)
* [eg: MAC, PRE]
* @do_wait: Wait for device to emit initialization done message (0
* for PRE fws, 1 for MAC fws).
* @returns: 0 if ok, < 0 errno on error.
*/
static
int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
const char *fw_tag)
{
int result;
const struct firmware *fw;
struct fw_hdr *fw_hdrs;
result = request_firmware(&fw, fw_name, i1480->dev);
if (result < 0) /* Up to caller to complain on -ENOENT */
goto out;
result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
if (result < 0) {
dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
"file: %d\n", fw_tag, fw_name, result);
goto out_release;
}
result = mac_fw_upload_enable(i1480);
if (result < 0)
goto out_hdrs_release;
result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
mac_fw_upload_disable(i1480);
out_hdrs_release:
if (result >= 0)
dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
else
dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
"power cycle device\n", fw_tag, fw_name, result);
fw_hdrs_free(fw_hdrs);
out_release:
release_firmware(fw);
out:
return result;
}
/**
* Upload a pre-PHY firmware
*
*/
int i1480_pre_fw_upload(struct i1480 *i1480)
{
int result;
result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
if (result == 0)
msleep(400);
return result;
}
/**
* Reset a the MAC and PHY
*
* @i1480: Device's instance
* @returns: 0 if ok, < 0 errno code on error
*
* We put the command on kmalloc'ed memory as some arches cannot do
* USB from the stack. The reply event is copied from an stage buffer,
* so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
*
* We issue the reset to make sure the UWB controller reinits the PHY;
* this way we can now if the PHY init went ok.
*/
static
int i1480_cmd_reset(struct i1480 *i1480)
{
int result;
struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
struct i1480_evt_reset {
struct uwb_rceb rceb;
u8 bResultCode;
} __attribute__((packed)) *reply = (void *) i1480->evt_buf;
result = -ENOMEM;
cmd->bCommandType = UWB_RC_CET_GENERAL;
cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
reply->rceb.bEventType = UWB_RC_CET_GENERAL;
reply->rceb.wEvent = UWB_RC_CMD_RESET;
result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
if (result < 0)
goto out;
if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(i1480->dev, "RESET: command execution failed: %u\n",
reply->bResultCode);
result = -EIO;
}
out:
return result;
}
/* Wait for the MAC FW to start running */
static
int i1480_fw_is_running_q(struct i1480 *i1480)
{
int cnt = 0;
int result;
u32 *val = (u32 *) i1480->cmd_buf;
for (cnt = 0; cnt < 10; cnt++) {
msleep(100);
result = i1480->read(i1480, 0x80080000, 4);
if (result < 0) {
dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
goto out;
}
if (*val == 0x55555555UL) /* fw running? cool */
goto out;
}
dev_err(i1480->dev, "Timed out waiting for fw to start\n");
result = -ETIMEDOUT;
out:
return result;
}
/**
* Upload MAC firmware, wait for it to start
*
* @i1480: Device instance
* @fw_name: Name of the file that contains the firmware
*
* This has to be called after the pre fw has been uploaded (if
* there is any).
*/
int i1480_mac_fw_upload(struct i1480 *i1480)
{
int result = 0, deprecated_name = 0;
struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
if (result == -ENOENT) {
result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
"MAC");
deprecated_name = 1;
}
if (result < 0)
return result;
if (deprecated_name == 1)
dev_warn(i1480->dev,
"WARNING: firmware file name %s is deprecated, "
"please rename to %s\n",
i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
result = i1480_fw_is_running_q(i1480);
if (result < 0)
goto error_fw_not_running;
result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
if (result < 0) {
dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
result);
goto error_setup;
}
result = i1480->wait_init_done(i1480); /* wait init'on */
if (result < 0) {
dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
"(%d)\n", i1480->mac_fw_name, result);
goto error_init_timeout;
}
/* verify we got the right initialization done event */
if (i1480->evt_result != sizeof(*rcebe)) {
dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
"wrong size (%zu bytes vs %zu needed)\n",
i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
goto error_size;
}
result = -EIO;
if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
i1480_EVT_RM_INIT_DONE) < 0) {
dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
"received; expected 0x%02x/%04x/00\n",
rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
rcebe->rceb.bEventContext, i1480_CET_VS1,
i1480_EVT_RM_INIT_DONE);
goto error_init_timeout;
}
result = i1480_cmd_reset(i1480);
if (result < 0)
dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
i1480->mac_fw_name, result);
error_fw_not_running:
error_init_timeout:
error_size:
error_setup:
return result;
}

View File

@ -1,190 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Wireless UWB Link 1480
* PHY parameters upload
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Code for uploading the PHY parameters to the PHY through the UWB
* Radio Control interface.
*
* We just send the data through the MPI interface using HWA-like
* commands and then reset the PHY to make sure it is ok.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include "../../../wusbcore/include/wusb.h"
#include "i1480-dfu.h"
/**
* Write a value array to an address of the MPI interface
*
* @i1480: Device descriptor
* @data: Data array to write
* @size: Size of the data array
* @returns: 0 if ok, < 0 errno code on error.
*
* The data array is organized into pairs:
*
* ADDRESS VALUE
*
* ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
* to be a multiple of three.
*/
static
int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
{
int result;
struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
struct i1480_evt_confirm *reply = i1480->evt_buf;
BUG_ON(size > 480);
result = -ENOMEM;
cmd->rccb.bCommandType = i1480_CET_VS1;
cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
cmd->size = cpu_to_le16(size);
memcpy(cmd->data, data, size);
reply->rceb.bEventType = i1480_CET_VS1;
reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
if (result < 0)
goto out;
if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
reply->bResultCode);
result = -EIO;
}
out:
return result;
}
/**
* Read a value array to from an address of the MPI interface
*
* @i1480: Device descriptor
* @data: where to place the read array
* @srcaddr: Where to read from
* @size: Size of the data read array
* @returns: 0 if ok, < 0 errno code on error.
*
* The command data array is organized into pairs ADDR0 ADDR1..., and
* the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
*
* We generate the command array to be a sequential read and then
* rearrange the result.
*
* We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
*
* As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
* of values we can read is (512 - sizeof(*reply)) / 3
*/
static
int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
{
int result;
struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
struct i1480_evt_mpi_read *reply = i1480->evt_buf;
unsigned cnt;
memset(i1480->cmd_buf, 0x69, 512);
memset(i1480->evt_buf, 0x69, 512);
BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
result = -ENOMEM;
cmd->rccb.bCommandType = i1480_CET_VS1;
cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
cmd->size = cpu_to_le16(3*size);
for (cnt = 0; cnt < size; cnt++) {
cmd->data[cnt].page = (srcaddr + cnt) >> 8;
cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
}
reply->rceb.bEventType = i1480_CET_VS1;
reply->rceb.wEvent = i1480_CMD_MPI_READ;
result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
sizeof(*reply) + 3*size);
if (result < 0)
goto out;
if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
reply->bResultCode);
result = -EIO;
goto out;
}
for (cnt = 0; cnt < size; cnt++) {
if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
dev_err(i1480->dev, "MPI-READ: page inconsistency at "
"index %u: expected 0x%02x, got 0x%02x\n", cnt,
(srcaddr + cnt) >> 8, reply->data[cnt].page);
if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
"index %u: expected 0x%02x, got 0x%02x\n", cnt,
(srcaddr + cnt) & 0x00ff,
reply->data[cnt].offset);
data[cnt] = reply->data[cnt].value;
}
result = 0;
out:
return result;
}
/**
* Upload a PHY firmware, wait for it to start
*
* @i1480: Device instance
* @fw_name: Name of the file that contains the firmware
*
* We assume the MAC fw is up and running. This means we can use the
* MPI interface to write the PHY firmware. Once done, we issue an
* MBOA Reset, which will force the MAC to reset and reinitialize the
* PHY. If that works, we are ready to go.
*
* Max packet size for the MPI write is 512, so the max buffer is 480
* (which gives us 160 byte triads of MSB, LSB and VAL for the data).
*/
int i1480_phy_fw_upload(struct i1480 *i1480)
{
int result;
const struct firmware *fw;
const char *data_itr, *data_top;
const size_t MAX_BLK_SIZE = 480; /* 160 triads */
size_t data_size;
u8 phy_stat;
result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
if (result < 0)
goto out;
/* Loop writing data in chunks as big as possible until done. */
for (data_itr = fw->data, data_top = data_itr + fw->size;
data_itr < data_top; data_itr += MAX_BLK_SIZE) {
data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
result = i1480_mpi_write(i1480, data_itr, data_size);
if (result < 0)
goto error_mpi_write;
}
/* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
if (result < 0) {
dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
goto error_mpi_status;
}
if (phy_stat != 0) {
result = -ENODEV;
dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
goto error_phy_status;
}
dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
error_phy_status:
error_mpi_status:
error_mpi_write:
release_firmware(fw);
if (result < 0)
dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
"power cycle device\n", i1480->phy_fw_name, result);
out:
return result;
}

View File

@ -1,448 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Wireless UWB Link 1480
* USB SKU firmware upload implementation
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This driver will prepare the i1480 device to behave as a real
* Wireless USB HWA adaptor by uploading the firmware.
*
* When the device is connected or driver is loaded, i1480_usb_probe()
* is called--this will allocate and initialize the device structure,
* fill in the pointers to the common functions (read, write,
* wait_init_done and cmd for HWA command execution) and once that is
* done, call the common firmware uploading routine. Then clean up and
* return -ENODEV, as we don't attach to the device.
*
* The rest are the basic ops we implement that the fw upload code
* uses to do its job. All the ops in the common code are i1480->NAME,
* the functions are i1480_usb_NAME().
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "../../uwb.h"
#include "../../../wusbcore/include/wusb.h"
#include "../../../wusbcore/include/wusb-wa.h"
#include "i1480-dfu.h"
struct i1480_usb {
struct i1480 i1480;
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
struct urb *neep_urb; /* URB for reading from EP1 */
};
static
void i1480_usb_init(struct i1480_usb *i1480_usb)
{
i1480_init(&i1480_usb->i1480);
}
static
int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
{
struct usb_device *usb_dev = interface_to_usbdev(iface);
int result = -ENOMEM;
i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
i1480_usb->usb_iface = usb_get_intf(iface);
usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
if (i1480_usb->neep_urb == NULL)
goto error;
return 0;
error:
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
}
static
void i1480_usb_destroy(struct i1480_usb *i1480_usb)
{
usb_kill_urb(i1480_usb->neep_urb);
usb_free_urb(i1480_usb->neep_urb);
usb_set_intfdata(i1480_usb->usb_iface, NULL);
usb_put_intf(i1480_usb->usb_iface);
usb_put_dev(i1480_usb->usb_dev);
}
/**
* Write a buffer to a memory address in the i1480 device
*
* @i1480: i1480 instance
* @memory_address:
* Address where to write the data buffer to.
* @buffer: Buffer to the data
* @size: Size of the buffer [has to be < 512].
* @returns: 0 if ok, < 0 errno code on error.
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
* case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
const void *buffer, size_t size)
{
int result = 0;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
size_t buffer_size, itr = 0;
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
while (size > 0) {
buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
memory_address, (memory_address >> 16),
i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
if (result < 0)
break;
itr += result;
memory_address += result;
size -= result;
}
return result;
}
/**
* Read a block [max size 512] of the device's memory to @i1480's buffer.
*
* @i1480: i1480 instance
* @memory_address:
* Address where to read from.
* @size: Size to read. Smaller than or equal to 512.
* @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
*
* NOTE: if the memory address or block is incorrect, you might get a
* stall or a different memory read. Caller has to verify the
* memory address and size passed back in the @neh structure.
*/
static
int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
{
ssize_t result = 0, bytes = 0;
size_t itr, read_size = i1480->buf_size;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
BUG_ON(size > i1480->buf_size);
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
BUG_ON(read_size > 512);
if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
read_size = 4;
for (itr = 0; itr < size; itr += read_size) {
size_t itr_addr = addr + itr;
size_t itr_size = min(read_size, size - itr);
result = usb_control_msg(
i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
itr_addr, (itr_addr >> 16),
i1480->cmd_buf + itr, itr_size,
100 /* FIXME: arbitrary */);
if (result < 0) {
dev_err(i1480->dev, "%s: USB read error: %zd\n",
__func__, result);
goto out;
}
if (result != itr_size) {
result = -EIO;
dev_err(i1480->dev,
"%s: partial read got only %zu bytes vs %zu expected\n",
__func__, result, itr_size);
goto out;
}
bytes += result;
}
result = bytes;
out:
return result;
}
/**
* Callback for reads on the notification/event endpoint
*
* Just enables the completion read handler.
*/
static
void i1480_usb_neep_cb(struct urb *urb)
{
struct i1480 *i1480 = urb->context;
struct device *dev = i1480->dev;
switch (urb->status) {
case 0:
break;
case -ECONNRESET: /* Not an error, but a controlled situation; */
case -ENOENT: /* (we killed the URB)...so, no broadcast */
dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
break;
case -ESHUTDOWN: /* going away! */
dev_dbg(dev, "NEEP: down %d\n", urb->status);
break;
default:
dev_err(dev, "NEEP: unknown status %d\n", urb->status);
break;
}
i1480->evt_result = urb->actual_length;
complete(&i1480->evt_complete);
return;
}
/**
* Wait for the MAC FW to initialize
*
* MAC FW sends a 0xfd/0101/00 notification to EP1 when done
* initializing. Get that notification into i1480->evt_buf; upper layer
* will verify it.
*
* Set i1480->evt_result with the result of getting the event or its
* size (if successful).
*
* Delivers the data directly to i1480->evt_buf
*/
static
int i1480_usb_wait_init_done(struct i1480 *i1480)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
init_completion(&i1480->evt_complete);
i1480->evt_result = -EINPROGRESS;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "init done: cannot submit NEEP read: %d\n",
result);
goto error_submit;
}
/* Wait for the USB callback to get the data */
result = wait_for_completion_interruptible_timeout(
&i1480->evt_complete, HZ);
if (result <= 0) {
result = result == 0 ? -ETIMEDOUT : result;
goto error_wait;
}
usb_kill_urb(i1480_usb->neep_urb);
return 0;
error_wait:
usb_kill_urb(i1480_usb->neep_urb);
error_submit:
i1480->evt_result = result;
return result;
}
/**
* Generic function for issuing commands to the i1480
*
* @i1480: i1480 instance
* @cmd_name: Name of the command (for error messages)
* @cmd: Pointer to command buffer
* @cmd_size: Size of the command buffer
* @reply: Buffer for the reply event
* @reply_size: Expected size back (including RCEB); the reply buffer
* is assumed to be as big as this.
* @returns: >= 0 size of the returned event data if ok,
* < 0 errno code on error.
*
* Arms the NE handle, issues the command to the device and checks the
* basics of the reply event.
*/
static
int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
struct uwb_rccb *cmd = i1480->cmd_buf;
u8 iface_no;
/* Post a read on the notification & event endpoint */
iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(
i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "%s: cannot submit NEEP read: %d\n",
cmd_name, result);
goto error_submit_ep1;
}
/* Now post the command on EP0 */
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
WA_EXEC_RC_CMD,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0, iface_no,
cmd, cmd_size,
100 /* FIXME: this is totally arbitrary */);
if (result < 0) {
dev_err(dev, "%s: control request failed: %d\n",
cmd_name, result);
goto error_submit_ep0;
}
return result;
error_submit_ep0:
usb_kill_urb(i1480_usb->neep_urb);
error_submit_ep1:
return result;
}
/*
* Probe a i1480 device for uploading firmware.
*
* We attach only to interface #0, which is the radio control interface.
*/
static
int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(iface);
struct i1480_usb *i1480_usb;
struct i1480 *i1480;
struct device *dev = &iface->dev;
int result;
result = -ENODEV;
if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
dev_dbg(dev, "not attaching to iface %d\n",
iface->cur_altsetting->desc.bInterfaceNumber);
goto error;
}
if (iface->num_altsetting > 1 &&
le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
if (result < 0)
dev_warn(dev,
"can't set altsetting 1 on iface 0: %d\n",
result);
}
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
dev_err(dev, "Unable to allocate instance\n");
goto error;
}
i1480_usb_init(i1480_usb);
i1480 = &i1480_usb->i1480;
i1480->buf_size = 512;
i1480->cmd_buf = kmalloc_array(2, i1480->buf_size, GFP_KERNEL);
if (i1480->cmd_buf == NULL) {
dev_err(dev, "Cannot allocate transfer buffers\n");
result = -ENOMEM;
goto error_buf_alloc;
}
i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
result = i1480_usb_create(i1480_usb, iface);
if (result < 0) {
dev_err(dev, "Cannot create instance: %d\n", result);
goto error_create;
}
/* setup the fops and upload the firmware */
i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
i1480->mac_fw_name = "i1480-usb-0.0.bin";
i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
i1480->phy_fw_name = "i1480-phy-0.0.bin";
i1480->dev = &iface->dev;
i1480->write = i1480_usb_write;
i1480->read = i1480_usb_read;
i1480->rc_setup = NULL;
i1480->wait_init_done = i1480_usb_wait_init_done;
i1480->cmd = i1480_usb_cmd;
result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
if (result >= 0) {
usb_reset_device(i1480_usb->usb_dev);
result = -ENODEV; /* we don't want to bind to the iface */
}
i1480_usb_destroy(i1480_usb);
error_create:
kfree(i1480->cmd_buf);
error_buf_alloc:
kfree(i1480_usb);
error:
return result;
}
MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
MODULE_FIRMWARE("i1480-usb-0.0.bin");
MODULE_FIRMWARE("i1480-phy-0.0.bin");
#define i1480_USB_DEV(v, p) \
{ \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE \
| USB_DEVICE_ID_MATCH_DEV_INFO \
| USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (v), \
.idProduct = (p), \
.bDeviceClass = 0xff, \
.bDeviceSubClass = 0xff, \
.bDeviceProtocol = 0xff, \
.bInterfaceClass = 0xff, \
.bInterfaceSubClass = 0xff, \
.bInterfaceProtocol = 0xff, \
}
/** USB device ID's that we handle */
static const struct usb_device_id i1480_usb_id_table[] = {
i1480_USB_DEV(0x8086, 0xdf3b),
i1480_USB_DEV(0x15a9, 0x0005),
i1480_USB_DEV(0x07d1, 0x3802),
i1480_USB_DEV(0x050d, 0x305a),
i1480_USB_DEV(0x3495, 0x3007),
{},
};
MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
static struct usb_driver i1480_dfu_driver = {
.name = "i1480-dfu-usb",
.id_table = i1480_usb_id_table,
.probe = i1480_usb_probe,
.disconnect = NULL,
};
module_usb_driver(i1480_dfu_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
MODULE_LICENSE("GPL");

View File

@ -1,85 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Wireless UWB Link 1480
* Event Size tables for Wired Adaptors
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/usb.h>
#include "../uwb.h"
#include "dfu/i1480-dfu.h"
/** Event size table for wEvents 0x00XX */
static struct uwb_est_entry i1480_est_fd00[] = {
/* Anybody expecting this response has to use
* neh->extra_size to specify the real size that will
* come back. */
[i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
[i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
#ifdef i1480_RCEB_EXTENDED
[0x09] = {
.size = sizeof(struct i1480_rceb),
.offset = 1 + offsetof(struct i1480_rceb, wParamLength),
},
#endif
};
/** Event size table for wEvents 0x01XX */
static struct uwb_est_entry i1480_est_fd01[] = {
[0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
[0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
[0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
[0xff & i1480_EVT_DEV_ID_CHANGE] = {
.size = sizeof(struct i1480_rceb) + 2 },
};
static int __init i1480_est_init(void)
{
int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
i1480_est_fd00,
ARRAY_SIZE(i1480_est_fd00));
if (result < 0) {
printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
return result;
}
result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
if (result < 0) {
printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
return result;
}
return 0;
}
module_init(i1480_est_init);
static void __exit i1480_est_exit(void)
{
uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
}
module_exit(i1480_est_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
MODULE_LICENSE("GPL");
/**
* USB device ID's that we handle
*
* [so we are loaded when this kind device is connected]
*/
static struct usb_device_id __used i1480_est_id_table[] = {
{ USB_DEVICE(0x8086, 0xdf3b), },
{ USB_DEVICE(0x8086, 0x0c3b), },
{ },
};
MODULE_DEVICE_TABLE(usb, i1480_est_id_table);

View File

@ -1,42 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* IE Received notification handling.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/*
* Process an incoming IE Received notification.
*/
int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_ie_rcv *iercv;
/* Is there enough data to decode it? */
if (evt->notif.size < sizeof(*iercv)) {
dev_err(dev, "IE Received notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*iercv));
goto error;
}
iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb);
dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]);
if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) {
dev_warn(dev, "unhandled Relinquish Request IE\n");
}
return 0;
error:
return result;
}

View File

@ -1,366 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Information Element Handling
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Reinette Chatre <reinette.chatre@intel.com>
*
* FIXME: docs
*/
#include <linux/slab.h>
#include <linux/export.h>
#include "uwb-internal.h"
/**
* uwb_ie_next - get the next IE in a buffer
* @ptr: start of the buffer containing the IE data
* @len: length of the buffer
*
* Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
* will get the next IE.
*
* NULL is returned (and @ptr and @len will not be updated) if there
* are no more IEs in the buffer or the buffer is too short.
*/
struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
{
struct uwb_ie_hdr *hdr;
size_t ie_len;
if (*len < sizeof(struct uwb_ie_hdr))
return NULL;
hdr = *ptr;
ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
if (*len < ie_len)
return NULL;
*ptr += ie_len;
*len -= ie_len;
return hdr;
}
EXPORT_SYMBOL_GPL(uwb_ie_next);
/**
* uwb_ie_dump_hex - print IEs to a character buffer
* @ies: the IEs to print.
* @len: length of all the IEs.
* @buf: the destination buffer.
* @size: size of @buf.
*
* Returns the number of characters written.
*/
int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len,
char *buf, size_t size)
{
void *ptr;
const struct uwb_ie_hdr *ie;
int r = 0;
u8 *d;
ptr = (void *)ies;
for (;;) {
ie = uwb_ie_next(&ptr, &len);
if (!ie)
break;
r += scnprintf(buf + r, size - r, "%02x %02x",
(unsigned)ie->element_id,
(unsigned)ie->length);
d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr);
while (d != ptr && r < size)
r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++);
if (r < size)
buf[r++] = '\n';
};
return r;
}
/**
* Get the IEs that a radio controller is sending in its beacon
*
* @uwb_rc: UWB Radio Controller
* @returns: Size read from the system
*
* We don't need to lock the uwb_rc's mutex because we don't modify
* anything. Once done with the iedata buffer, call
* uwb_rc_ie_release(iedata). Don't call kfree on it.
*/
static
ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
{
ssize_t result;
struct device *dev = &uwb_rc->uwb_dev.dev;
struct uwb_rccb *cmd = NULL;
struct uwb_rceb *reply = NULL;
struct uwb_rc_evt_get_ie *get_ie;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->bCommandType = UWB_RC_CET_GENERAL;
cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
&reply);
kfree(cmd);
if (result < 0)
return result;
get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
if (result < sizeof(*get_ie)) {
dev_err(dev, "not enough data returned for decoding GET IE "
"(%zu bytes received vs %zu needed)\n",
result, sizeof(*get_ie));
return -EINVAL;
} else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
dev_err(dev, "not enough data returned for decoding GET IE "
"payload (%zu bytes received vs %zu needed)\n", result,
sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
return -EINVAL;
}
*pget_ie = get_ie;
return result;
}
/**
* Replace all IEs currently being transmitted by a device
*
* @cmd: pointer to the SET-IE command with the IEs to set
* @size: size of @buf
*/
int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_evt_set_ie reply;
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
else if (result != sizeof(reply)) {
dev_err(dev, "SET-IE: not enough data to decode reply "
"(%d bytes received vs %zu needed)\n",
result, sizeof(reply));
result = -EIO;
} else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
} else
result = 0;
error_cmd:
return result;
}
/* Cleanup the whole IE management subsystem */
void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
{
mutex_init(&uwb_rc->ies_mutex);
}
/**
* uwb_rc_ie_setup - setup a radio controller's IE manager
* @uwb_rc: the radio controller.
*
* The current set of IEs are obtained from the hardware with a GET-IE
* command (since the radio controller is not yet beaconing this will
* be just the hardware's MAC and PHY Capability IEs).
*
* Returns 0 on success; -ve on an error.
*/
int uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
{
struct uwb_rc_evt_get_ie *ie_info = NULL;
int capacity;
capacity = uwb_rc_get_ie(uwb_rc, &ie_info);
if (capacity < 0)
return capacity;
mutex_lock(&uwb_rc->ies_mutex);
uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info;
uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
uwb_rc->ies_capacity = capacity;
mutex_unlock(&uwb_rc->ies_mutex);
return 0;
}
/* Cleanup the whole IE management subsystem */
void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
{
kfree(uwb_rc->ies);
uwb_rc->ies = NULL;
uwb_rc->ies_capacity = 0;
}
static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie)
{
struct uwb_rc_cmd_set_ie *new_ies;
void *ptr, *prev_ie;
struct uwb_ie_hdr *ie;
size_t length, new_ie_len, new_capacity, size, prev_size;
length = le16_to_cpu(rc->ies->wIELength);
new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length;
new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len;
if (new_capacity > rc->ies_capacity) {
new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL);
if (!new_ies)
return -ENOMEM;
rc->ies = new_ies;
}
ptr = rc->ies->IEData;
size = length;
for (;;) {
prev_ie = ptr;
prev_size = size;
ie = uwb_ie_next(&ptr, &size);
if (!ie || ie->element_id > new_ie->element_id)
break;
}
memmove(prev_ie + new_ie_len, prev_ie, prev_size);
memcpy(prev_ie, new_ie, new_ie_len);
rc->ies->wIELength = cpu_to_le16(length + new_ie_len);
return 0;
}
/**
* uwb_rc_ie_add - add new IEs to the radio controller's beacon
* @uwb_rc: the radio controller.
* @ies: the buffer containing the new IE or IEs to be added to
* the device's beacon.
* @size: length of all the IEs.
*
* According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
* after the device sent the first beacon that includes the IEs specified
* in the SET IE command. We thus cannot send this command if the device is
* not beaconing. Instead, a SET IE command will be sent later right after
* we start beaconing.
*
* Setting an IE on the device will overwrite all current IEs in device. So
* we take the current IEs being transmitted by the device, insert the
* new one, and call SET IE with all the IEs needed.
*
* Returns 0 on success; or -ENOMEM.
*/
int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
const struct uwb_ie_hdr *ies, size_t size)
{
int result = 0;
void *ptr;
const struct uwb_ie_hdr *ie;
mutex_lock(&uwb_rc->ies_mutex);
ptr = (void *)ies;
for (;;) {
ie = uwb_ie_next(&ptr, &size);
if (!ie)
break;
result = uwb_rc_ie_add_one(uwb_rc, ie);
if (result < 0)
break;
}
if (result >= 0) {
if (size == 0) {
if (uwb_rc->beaconing != -1)
result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
} else
result = -EINVAL;
}
mutex_unlock(&uwb_rc->ies_mutex);
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
/*
* Remove an IE from internal cache
*
* We are dealing with our internal IE cache so no need to verify that the
* IEs are valid (it has been done already).
*
* Should be called with ies_mutex held
*
* We do not break out once an IE is found in the cache. It is currently
* possible to have more than one IE with the same ID included in the
* beacon. We don't reallocate, we just mark the size smaller.
*/
static
void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
{
struct uwb_ie_hdr *ie;
size_t len = le16_to_cpu(uwb_rc->ies->wIELength);
void *ptr;
size_t size;
ptr = uwb_rc->ies->IEData;
size = len;
for (;;) {
ie = uwb_ie_next(&ptr, &size);
if (!ie)
break;
if (ie->element_id == to_remove) {
len -= sizeof(struct uwb_ie_hdr) + ie->length;
memmove(ie, ptr, size);
ptr = ie;
}
}
uwb_rc->ies->wIELength = cpu_to_le16(len);
}
/**
* uwb_rc_ie_rm - remove an IE from the radio controller's beacon
* @uwb_rc: the radio controller.
* @element_id: the element ID of the IE to remove.
*
* Only IEs previously added with uwb_rc_ie_add() may be removed.
*
* Returns 0 on success; or -ve the SET-IE command to the radio
* controller failed.
*/
int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
{
int result = 0;
mutex_lock(&uwb_rc->ies_mutex);
uwb_rc_ie_cache_rm(uwb_rc, element_id);
if (uwb_rc->beaconing != -1)
result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
mutex_unlock(&uwb_rc->ies_mutex);
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);

View File

@ -1,57 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Ultra Wide Band
* Debug interface commands
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#ifndef __LINUX__UWB__DEBUG_CMD_H__
#define __LINUX__UWB__DEBUG_CMD_H__
#include <linux/types.h>
/*
* Debug interface commands
*
* UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
*
* UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
*/
enum uwb_dbg_cmd_type {
UWB_DBG_CMD_RSV_ESTABLISH = 1,
UWB_DBG_CMD_RSV_TERMINATE = 2,
UWB_DBG_CMD_IE_ADD = 3,
UWB_DBG_CMD_IE_RM = 4,
UWB_DBG_CMD_RADIO_START = 5,
UWB_DBG_CMD_RADIO_STOP = 6,
};
struct uwb_dbg_cmd_rsv_establish {
__u8 target[6];
__u8 type;
__u16 max_mas;
__u16 min_mas;
__u8 max_interval;
};
struct uwb_dbg_cmd_rsv_terminate {
int index;
};
struct uwb_dbg_cmd_ie {
__u8 data[128];
int len;
};
struct uwb_dbg_cmd {
__u32 type;
union {
struct uwb_dbg_cmd_rsv_establish rsv_establish;
struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
struct uwb_dbg_cmd_ie ie_add;
struct uwb_dbg_cmd_ie ie_rm;
};
};
#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */

View File

@ -1,767 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Ultra Wide Band
* UWB Standard definitions
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* All these definitions are based on the ECMA-368 standard.
*
* Note all definitions are Little Endian in the wire, and we will
* convert them to host order before operating on the bitfields (that
* yes, we use extensively).
*/
#ifndef __LINUX__UWB_SPEC_H__
#define __LINUX__UWB_SPEC_H__
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/if_ether.h>
#define i1480_FW 0x00000303
/* #define i1480_FW 0x00000302 */
/**
* Number of Medium Access Slots in a superframe.
*
* UWB divides time in SuperFrames, each one divided in 256 pieces, or
* Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
* basic bandwidth allocation unit in UWB.
*/
enum { UWB_NUM_MAS = 256 };
/**
* Number of Zones in superframe.
*
* UWB divides the superframe into zones with numbering starting from BPST.
* See MBOA MAC[16.8.6]
*/
enum { UWB_NUM_ZONES = 16 };
/*
* Number of MAS in a zone.
*/
#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
/*
* Number of MAS required before a row can be considered available.
*/
#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1)
/*
* Number of streams per DRP reservation between a pair of devices.
*
* [ECMA-368] section 16.8.6.
*/
enum { UWB_NUM_STREAMS = 8 };
/*
* mMasLength
*
* The length of a MAS in microseconds.
*
* [ECMA-368] section 17.16.
*/
enum { UWB_MAS_LENGTH_US = 256 };
/*
* mBeaconSlotLength
*
* The length of the beacon slot in microseconds.
*
* [ECMA-368] section 17.16
*/
enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
/*
* mMaxLostBeacons
*
* The number beacons missing in consecutive superframes before a
* device can be considered as unreachable.
*
* [ECMA-368] section 17.16
*/
enum { UWB_MAX_LOST_BEACONS = 3 };
/*
* mDRPBackOffWinMin
*
* The minimum number of superframes to wait before trying to reserve
* extra MAS.
*
* [ECMA-368] section 17.16
*/
enum { UWB_DRP_BACKOFF_WIN_MIN = 2 };
/*
* mDRPBackOffWinMax
*
* The maximum number of superframes to wait before trying to reserve
* extra MAS.
*
* [ECMA-368] section 17.16
*/
enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
/*
* Length of a superframe in microseconds.
*/
#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
/**
* UWB MAC address
*
* It is *imperative* that this struct is exactly 6 packed bytes (as
* it is also used to define headers sent down and up the wire/radio).
*/
struct uwb_mac_addr {
u8 data[ETH_ALEN];
} __attribute__((packed));
/**
* UWB device address
*
* It is *imperative* that this struct is exactly 6 packed bytes (as
* it is also used to define headers sent down and up the wire/radio).
*/
struct uwb_dev_addr {
u8 data[2];
} __attribute__((packed));
/**
* Types of UWB addresses
*
* Order matters (by size).
*/
enum uwb_addr_type {
UWB_ADDR_DEV = 0,
UWB_ADDR_MAC = 1,
};
/** Size of a char buffer for printing a MAC/device address */
enum { UWB_ADDR_STRSIZE = 32 };
/** UWB WiMedia protocol IDs. */
enum uwb_prid {
UWB_PRID_WLP_RESERVED = 0x0000,
UWB_PRID_WLP = 0x0001,
UWB_PRID_WUSB_BOT = 0x0010,
UWB_PRID_WUSB = 0x0010,
UWB_PRID_WUSB_TOP = 0x001F,
};
/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
enum uwb_phy_rate {
UWB_PHY_RATE_53 = 0,
UWB_PHY_RATE_80,
UWB_PHY_RATE_106,
UWB_PHY_RATE_160,
UWB_PHY_RATE_200,
UWB_PHY_RATE_320,
UWB_PHY_RATE_400,
UWB_PHY_RATE_480,
UWB_PHY_RATE_INVALID
};
/**
* Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
*/
enum uwb_scan_type {
UWB_SCAN_ONLY = 0,
UWB_SCAN_OUTSIDE_BP,
UWB_SCAN_WHILE_INACTIVE,
UWB_SCAN_DISABLED,
UWB_SCAN_ONLY_STARTTIME,
UWB_SCAN_TOP
};
/** ACK Policy types (MBOA MAC[7.2.1.3]) */
enum uwb_ack_pol {
UWB_ACK_NO = 0,
UWB_ACK_INM = 1,
UWB_ACK_B = 2,
UWB_ACK_B_REQ = 3,
};
/** DRP reservation types ([ECMA-368 table 106) */
enum uwb_drp_type {
UWB_DRP_TYPE_ALIEN_BP = 0,
UWB_DRP_TYPE_HARD,
UWB_DRP_TYPE_SOFT,
UWB_DRP_TYPE_PRIVATE,
UWB_DRP_TYPE_PCA,
};
/** DRP Reason Codes ([ECMA-368] table 107) */
enum uwb_drp_reason {
UWB_DRP_REASON_ACCEPTED = 0,
UWB_DRP_REASON_CONFLICT,
UWB_DRP_REASON_PENDING,
UWB_DRP_REASON_DENIED,
UWB_DRP_REASON_MODIFIED,
};
/** Relinquish Request Reason Codes ([ECMA-368] table 113) */
enum uwb_relinquish_req_reason {
UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0,
UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION,
};
/**
* DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
*/
enum uwb_drp_notif_reason {
UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
UWB_DRP_NOTIF_CONFLICT,
UWB_DRP_NOTIF_TERMINATE,
};
/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
struct uwb_drp_alloc {
__le16 zone_bm;
__le16 mas_bm;
} __attribute__((packed));
/** General MAC Header format (ECMA-368[16.2]) */
struct uwb_mac_frame_hdr {
__le16 Frame_Control;
struct uwb_dev_addr DestAddr;
struct uwb_dev_addr SrcAddr;
__le16 Sequence_Control;
__le16 Access_Information;
} __attribute__((packed));
/**
* uwb_beacon_frame - a beacon frame including MAC headers
*
* [ECMA] section 16.3.
*/
struct uwb_beacon_frame {
struct uwb_mac_frame_hdr hdr;
struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
u8 Beacon_Slot_Number;
u8 Device_Control;
u8 IEData[];
} __attribute__((packed));
/** Information Element codes (MBOA MAC[T54]) */
enum uwb_ie {
UWB_PCA_AVAILABILITY = 2,
UWB_IE_DRP_AVAILABILITY = 8,
UWB_IE_DRP = 9,
UWB_BP_SWITCH_IE = 11,
UWB_MAC_CAPABILITIES_IE = 12,
UWB_PHY_CAPABILITIES_IE = 13,
UWB_APP_SPEC_PROBE_IE = 15,
UWB_IDENTIFICATION_IE = 19,
UWB_MASTER_KEY_ID_IE = 20,
UWB_RELINQUISH_REQUEST_IE = 21,
UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
UWB_APP_SPEC_IE = 255,
};
/**
* Header common to all Information Elements (IEs)
*/
struct uwb_ie_hdr {
u8 element_id; /* enum uwb_ie */
u8 length;
} __attribute__((packed));
/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
struct uwb_ie_drp {
struct uwb_ie_hdr hdr;
__le16 drp_control;
struct uwb_dev_addr dev_addr;
struct uwb_drp_alloc allocs[];
} __attribute__((packed));
static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
}
static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
}
static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
}
static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
}
static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
}
static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
}
static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
{
return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
}
static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
enum uwb_drp_reason reason_code)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
ie->drp_control = cpu_to_le16(drp_control);
}
static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
{
u16 drp_control = le16_to_cpu(ie->drp_control);
drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
ie->drp_control = cpu_to_le16(drp_control);
}
/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
struct uwb_ie_drp_avail {
struct uwb_ie_hdr hdr;
DECLARE_BITMAP(bmp, UWB_NUM_MAS);
} __attribute__((packed));
/* Relinqish Request IE ([ECMA-368] section 16.8.19). */
struct uwb_relinquish_request_ie {
struct uwb_ie_hdr hdr;
__le16 relinquish_req_control;
struct uwb_dev_addr dev_addr;
struct uwb_drp_alloc allocs[];
} __attribute__((packed));
static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie)
{
return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf;
}
static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie,
int reason_code)
{
u16 ctrl = le16_to_cpu(ie->relinquish_req_control);
ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0);
ie->relinquish_req_control = cpu_to_le16(ctrl);
}
/**
* The Vendor ID is set to an OUI that indicates the vendor of the device.
* ECMA-368 [16.8.10]
*/
struct uwb_vendor_id {
u8 data[3];
} __attribute__((packed));
/**
* The device type ID
* FIXME: clarify what this means
* ECMA-368 [16.8.10]
*/
struct uwb_device_type_id {
u8 data[3];
} __attribute__((packed));
/**
* UWB device information types
* ECMA-368 [16.8.10]
*/
enum uwb_dev_info_type {
UWB_DEV_INFO_VENDOR_ID = 0,
UWB_DEV_INFO_VENDOR_TYPE,
UWB_DEV_INFO_NAME,
};
/**
* UWB device information found in Identification IE
* ECMA-368 [16.8.10]
*/
struct uwb_dev_info {
u8 type; /* enum uwb_dev_info_type */
u8 length;
u8 data[];
} __attribute__((packed));
/**
* UWB Identification IE
* ECMA-368 [16.8.10]
*/
struct uwb_identification_ie {
struct uwb_ie_hdr hdr;
struct uwb_dev_info info[];
} __attribute__((packed));
/*
* UWB Radio Controller
*
* These definitions are common to the Radio Control layers as
* exported by the WUSB1.0 HWA and WHCI interfaces.
*/
/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
struct uwb_rccb {
u8 bCommandType; /* enum hwa_cet */
__le16 wCommand; /* Command code */
u8 bCommandContext; /* Context ID */
} __attribute__((packed));
/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
struct uwb_rceb {
u8 bEventType; /* enum hwa_cet */
__le16 wEvent; /* Event code */
u8 bEventContext; /* Context ID */
} __attribute__((packed));
enum {
UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
};
/* Commands to the radio controller */
enum uwb_rc_cmd {
UWB_RC_CMD_CHANNEL_CHANGE = 16,
UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
UWB_RC_CMD_RESET = 19,
UWB_RC_CMD_SCAN = 20, /* Scan management */
UWB_RC_CMD_SET_BEACON_FILTER = 21,
UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
UWB_RC_CMD_SET_IE = 23, /* Information Element management */
UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
UWB_RC_CMD_SET_TX_POWER = 25,
UWB_RC_CMD_SLEEP = 26,
UWB_RC_CMD_START_BEACON = 27,
UWB_RC_CMD_STOP_BEACON = 28,
UWB_RC_CMD_BP_MERGE = 29,
UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
UWB_RC_CMD_SET_ASIE_NOTIF = 31,
};
/* Notifications from the radio controller */
enum uwb_rc_evt {
UWB_RC_EVT_IE_RCV = 0,
UWB_RC_EVT_BEACON = 1,
UWB_RC_EVT_BEACON_SIZE = 2,
UWB_RC_EVT_BPOIE_CHANGE = 3,
UWB_RC_EVT_BP_SLOT_CHANGE = 4,
UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
UWB_RC_EVT_DRP_AVAIL = 7,
UWB_RC_EVT_DRP = 8,
UWB_RC_EVT_BP_SWITCH_STATUS = 9,
UWB_RC_EVT_CMD_FRAME_RCV = 10,
UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
/* Events (command responses) use the same code as the command */
UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
};
enum uwb_rc_extended_type_1_cmd {
UWB_RC_SET_DAA_ENERGY_MASK = 32,
UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
};
enum uwb_rc_extended_type_1_evt {
UWB_RC_DAA_ENERGY_DETECTED = 0,
};
/* Radio Control Result Code. [WHCI] table 3-3. */
enum {
UWB_RC_RES_SUCCESS = 0,
UWB_RC_RES_FAIL,
UWB_RC_RES_FAIL_HARDWARE,
UWB_RC_RES_FAIL_NO_SLOTS,
UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
UWB_RC_RES_FAIL_INVALID_PARAMETER,
UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
UWB_RC_RES_FAIL_INVALID_IE_DATA,
UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
UWB_RC_RES_FAIL_CANCELLED,
UWB_RC_RES_FAIL_INVALID_STATE,
UWB_RC_RES_FAIL_INVALID_SIZE,
UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
UWB_RC_RES_FAIL_TIME_OUT = 255,
};
/* Confirm event. [WHCI] section 3.1.3.1 etc. */
struct uwb_rc_evt_confirm {
struct uwb_rceb rceb;
u8 bResultCode;
} __attribute__((packed));
/* Device Address Management event. [WHCI] section 3.1.3.2. */
struct uwb_rc_evt_dev_addr_mgmt {
struct uwb_rceb rceb;
u8 baAddr[ETH_ALEN];
u8 bResultCode;
} __attribute__((packed));
/* Get IE Event. [WHCI] section 3.1.3.3. */
struct uwb_rc_evt_get_ie {
struct uwb_rceb rceb;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
struct uwb_rc_evt_set_drp_ie {
struct uwb_rceb rceb;
__le16 wRemainingSpace;
u8 bResultCode;
} __attribute__((packed));
/* Set IE Event. [WHCI] section 3.1.3.8. */
struct uwb_rc_evt_set_ie {
struct uwb_rceb rceb;
__le16 RemainingSpace;
u8 bResultCode;
} __attribute__((packed));
/* Scan command. [WHCI] 3.1.3.5. */
struct uwb_rc_cmd_scan {
struct uwb_rccb rccb;
u8 bChannelNumber;
u8 bScanState;
__le16 wStartTime;
} __attribute__((packed));
/* Set DRP IE command. [WHCI] section 3.1.3.7. */
struct uwb_rc_cmd_set_drp_ie {
struct uwb_rccb rccb;
__le16 wIELength;
struct uwb_ie_drp IEData[];
} __attribute__((packed));
/* Set IE command. [WHCI] section 3.1.3.8. */
struct uwb_rc_cmd_set_ie {
struct uwb_rccb rccb;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
struct uwb_rc_evt_set_daa_energy_mask {
struct uwb_rceb rceb;
__le16 wLength;
u8 result;
} __attribute__((packed));
/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
struct uwb_rc_evt_set_notification_filter_ex {
struct uwb_rceb rceb;
__le16 wLength;
u8 result;
} __attribute__((packed));
/* IE Received notification. [WHCI] section 3.1.4.1. */
struct uwb_rc_evt_ie_rcv {
struct uwb_rceb rceb;
struct uwb_dev_addr SrcAddr;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/* Type of the received beacon. [WHCI] section 3.1.4.2. */
enum uwb_rc_beacon_type {
UWB_RC_BEACON_TYPE_SCAN = 0,
UWB_RC_BEACON_TYPE_NEIGHBOR,
UWB_RC_BEACON_TYPE_OL_ALIEN,
UWB_RC_BEACON_TYPE_NOL_ALIEN,
};
/* Beacon received notification. [WHCI] 3.1.4.2. */
struct uwb_rc_evt_beacon {
struct uwb_rceb rceb;
u8 bChannelNumber;
u8 bBeaconType;
__le16 wBPSTOffset;
u8 bLQI;
u8 bRSSI;
__le16 wBeaconInfoLength;
u8 BeaconInfo[];
} __attribute__((packed));
/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
struct uwb_rc_evt_beacon_size {
struct uwb_rceb rceb;
__le16 wNewBeaconSize;
} __attribute__((packed));
/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
struct uwb_rc_evt_bpoie_change {
struct uwb_rceb rceb;
__le16 wBPOIELength;
u8 BPOIE[];
} __attribute__((packed));
/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
struct uwb_rc_evt_bp_slot_change {
struct uwb_rceb rceb;
u8 slot_info;
} __attribute__((packed));
static inline int uwb_rc_evt_bp_slot_change_slot_num(
const struct uwb_rc_evt_bp_slot_change *evt)
{
return evt->slot_info & 0x7f;
}
static inline int uwb_rc_evt_bp_slot_change_no_slot(
const struct uwb_rc_evt_bp_slot_change *evt)
{
return (evt->slot_info & 0x80) >> 7;
}
/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
struct uwb_rc_evt_bp_switch_ie_rcv {
struct uwb_rceb rceb;
struct uwb_dev_addr wSrcAddr;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
struct uwb_rc_evt_dev_addr_conflict {
struct uwb_rceb rceb;
} __attribute__((packed));
/* DRP notification. [WHCI] section 3.1.4.9. */
struct uwb_rc_evt_drp {
struct uwb_rceb rceb;
struct uwb_dev_addr src_addr;
u8 reason;
u8 beacon_slot_number;
__le16 ie_length;
u8 ie_data[];
} __attribute__((packed));
static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
{
return evt->reason & 0x0f;
}
/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
struct uwb_rc_evt_drp_avail {
struct uwb_rceb rceb;
DECLARE_BITMAP(bmp, UWB_NUM_MAS);
} __attribute__((packed));
/* BP switch status notification. [WHCI] section 3.1.4.10. */
struct uwb_rc_evt_bp_switch_status {
struct uwb_rceb rceb;
u8 status;
u8 slot_offset;
__le16 bpst_offset;
u8 move_countdown;
} __attribute__((packed));
/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
struct uwb_rc_evt_cmd_frame_rcv {
struct uwb_rceb rceb;
__le16 receive_time;
struct uwb_dev_addr wSrcAddr;
struct uwb_dev_addr wDstAddr;
__le16 control;
__le16 reserved;
__le16 dataLength;
u8 data[];
} __attribute__((packed));
/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
struct uwb_rc_evt_channel_change_ie_rcv {
struct uwb_rceb rceb;
struct uwb_dev_addr wSrcAddr;
__le16 wIELength;
u8 IEData[];
} __attribute__((packed));
/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
struct uwb_rc_evt_daa_energy_detected {
struct uwb_rceb rceb;
__le16 wLength;
u8 bandID;
u8 reserved;
u8 toneBmp[16];
} __attribute__((packed));
/**
* Radio Control Interface Class Descriptor
*
* WUSB 1.0 [8.6.1.2]
*/
struct uwb_rc_control_intf_class_desc {
u8 bLength;
u8 bDescriptorType;
__le16 bcdRCIVersion;
} __attribute__((packed));
#endif /* #ifndef __LINUX__UWB_SPEC_H__ */

View File

@ -1,192 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* UWB Multi-interface Controller support.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*
* UMC (UWB Multi-interface Controller) capabilities (e.g., radio
* controller, host controller) are presented as devices on the "umc"
* bus.
*
* The radio controller is not strictly a UMC capability but it's
* useful to present it as such.
*
* References:
*
* [WHCI] Wireless Host Controller Interface Specification for
* Certified Wireless Universal Serial Bus, revision 0.95.
*
* How this works is kind of convoluted but simple. The whci.ko driver
* loads when WHCI devices are detected. These WHCI devices expose
* many devices in the same PCI function (they couldn't have reused
* functions, no), so for each PCI function that exposes these many
* devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
* with umc_device_create() and adds it to the bus with
* umc_device_register().
*
* umc_device_register() calls device_register() which will push the
* bus management code to load your UMC driver's somehting_probe()
* that you have registered for that capability code.
*
* Now when the WHCI device is removed, whci_remove() will go over
* each umc_dev assigned to each of the PCI function's capabilities
* and through whci_del_cap() call umc_device_unregister() each
* created umc_dev. Of course, if you are bound to the device, your
* driver's something_remove() will be called.
*/
#ifndef _LINUX_UWB_UMC_H_
#define _LINUX_UWB_UMC_H_
#include <linux/device.h>
#include <linux/pci.h>
/*
* UMC capability IDs.
*
* 0x00 is reserved so use it for the radio controller device.
*
* [WHCI] table 2-8
*/
#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
/**
* struct umc_dev - UMC capability device
*
* @version: version of the specification this capability conforms to.
* @cap_id: capability ID.
* @bar: PCI Bar (64 bit) where the resource lies
* @resource: register space resource.
* @irq: interrupt line.
*/
struct umc_dev {
u16 version;
u8 cap_id;
u8 bar;
struct resource resource;
unsigned irq;
struct device dev;
};
#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
/**
* struct umc_driver - UMC capability driver
* @cap_id: supported capability ID.
* @match: driver specific capability matching function.
* @match_data: driver specific data for match() (e.g., a
* table of pci_device_id's if umc_match_pci_id() is used).
*/
struct umc_driver {
char *name;
u8 cap_id;
int (*match)(struct umc_driver *, struct umc_dev *);
const void *match_data;
int (*probe)(struct umc_dev *);
void (*remove)(struct umc_dev *);
int (*pre_reset)(struct umc_dev *);
int (*post_reset)(struct umc_dev *);
struct device_driver driver;
};
#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
extern struct bus_type umc_bus_type;
struct umc_dev *umc_device_create(struct device *parent, int n);
int __must_check umc_device_register(struct umc_dev *umc);
void umc_device_unregister(struct umc_dev *umc);
int __must_check __umc_driver_register(struct umc_driver *umc_drv,
struct module *mod,
const char *mod_name);
/**
* umc_driver_register - register a UMC capabiltity driver.
* @umc_drv: pointer to the driver.
*/
#define umc_driver_register(umc_drv) \
__umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME)
void umc_driver_unregister(struct umc_driver *umc_drv);
/*
* Utility function you can use to match (umc_driver->match) against a
* null-terminated array of 'struct pci_device_id' in
* umc_driver->match_data.
*/
int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
/**
* umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
* @umc_dev: UMC device whose parent PCI device we are looking for
*
* DIRTY!!! DON'T RELY ON THIS
*
* FIXME: This is as dirty as it gets, but we need some way to check
* the correct type of umc_dev->parent (so that for example, we can
* cast to pci_dev). Casting to pci_dev is necessary because at some
* point we need to request resources from the device. Mapping is
* easily over come (ioremap and stuff are bus agnostic), but hooking
* up to some error handlers (such as pci error handlers) might need
* this.
*
* THIS might (probably will) be removed in the future, so don't count
* on it.
*/
static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
{
struct pci_dev *pci_dev = NULL;
if (dev_is_pci(umc_dev->dev.parent))
pci_dev = to_pci_dev(umc_dev->dev.parent);
return pci_dev;
}
/**
* umc_dev_get() - reference a UMC device.
* @umc_dev: Pointer to UMC device.
*
* NOTE: we are assuming in this whole scheme that the parent device
* is referenced at _probe() time and unreferenced at _remove()
* time by the parent's subsystem.
*/
static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
{
get_device(&umc_dev->dev);
return umc_dev;
}
/**
* umc_dev_put() - unreference a UMC device.
* @umc_dev: Pointer to UMC device.
*/
static inline void umc_dev_put(struct umc_dev *umc_dev)
{
put_device(&umc_dev->dev);
}
/**
* umc_set_drvdata - set UMC device's driver data.
* @umc_dev: Pointer to UMC device.
* @data: Data to set.
*/
static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
{
dev_set_drvdata(&umc_dev->dev, data);
}
/**
* umc_get_drvdata - recover UMC device's driver data.
* @umc_dev: Pointer to UMC device.
*/
static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
{
return dev_get_drvdata(&umc_dev->dev);
}
int umc_controller_reset(struct umc_dev *umc);
#endif /* #ifndef _LINUX_UWB_UMC_H_ */

View File

@ -1,102 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* References:
* [WHCI] Wireless Host Controller Interface Specification for
* Certified Wireless Universal Serial Bus, revision 0.95.
*/
#ifndef _LINUX_UWB_WHCI_H_
#define _LINUX_UWB_WHCI_H_
#include <linux/pci.h>
/*
* UWB interface capability registers (offsets from UWBBASE)
*
* [WHCI] section 2.2
*/
#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
#define UWBCAPDATA(n) (8*(n))
# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
/* Size of the WHCI capability data (including the RC capability) for
a device with n capabilities. */
#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
/*
* URC registers (offsets from URCBASE)
*
* [WHCI] section 2.3
*/
#define URCCMD 0x00
# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
# define URCCMD_RS (1 << 30) /* Run/Stop */
# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
# define URCCMD_ACTIVE (1 << 15) /* Command is active */
# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
#define URCSTS 0x04
# define URCSTS_EPS (1 << 17) /* Event Processing Status */
# define URCSTS_HALTED (1 << 16) /* RC halted */
# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
# define URCSTS_ER (1 << 9) /* Event Ready */
# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
#define URCINTR 0x08
# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
#define URCCMDADDR 0x10
#define URCEVTADDR 0x18
# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
/** Write 32 bit @value to little endian register at @addr */
static inline
void le_writel(u32 value, void __iomem *addr)
{
iowrite32(value, addr);
}
/** Read from 32 bit little endian register at @addr */
static inline
u32 le_readl(void __iomem *addr)
{
return ioread32(addr);
}
/** Write 64 bit @value to little endian register at @addr */
static inline
void le_writeq(u64 value, void __iomem *addr)
{
iowrite32(value, addr);
iowrite32(value >> 32, addr + 4);
}
/** Read from 64 bit little endian register at @addr */
static inline
u64 le_readq(void __iomem *addr)
{
u64 value;
value = ioread32(addr);
value |= (u64)ioread32(addr + 4) << 32;
return value;
}
extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
u32 mask, u32 result,
unsigned long max_ms, const char *tag);
#endif /* #ifndef _LINUX_UWB_WHCI_H_ */

View File

@ -1,457 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Life cycle of devices
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/random.h>
#include <linux/stat.h>
#include "uwb-internal.h"
/* We initialize addresses to 0xff (invalid, as it is bcast) */
static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
{
memset(&addr->data, 0xff, sizeof(addr->data));
}
static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
{
memset(&addr->data, 0xff, sizeof(addr->data));
}
/*
* Add callback @new to be called when an event occurs in @rc.
*/
int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
{
if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
return -ERESTARTSYS;
list_add(&new->list_node, &rc->notifs_chain.list);
mutex_unlock(&rc->notifs_chain.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(uwb_notifs_register);
/*
* Remove event handler (callback)
*/
int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
{
if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
return -ERESTARTSYS;
list_del(&entry->list_node);
mutex_unlock(&rc->notifs_chain.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
/*
* Notify all event handlers of a given event on @rc
*
* We are called with a valid reference to the device, or NULL if the
* event is not for a particular event (e.g., a BG join event).
*/
void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
{
struct uwb_notifs_handler *handler;
if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
return;
if (!list_empty(&rc->notifs_chain.list)) {
list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
handler->cb(handler->data, uwb_dev, event);
}
}
mutex_unlock(&rc->notifs_chain.mutex);
}
/*
* Release the backing device of a uwb_dev that has been dynamically allocated.
*/
static void uwb_dev_sys_release(struct device *dev)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
uwb_bce_put(uwb_dev->bce);
memset(uwb_dev, 0x69, sizeof(*uwb_dev));
kfree(uwb_dev);
}
/*
* Initialize a UWB device instance
*
* Alloc, zero and call this function.
*/
void uwb_dev_init(struct uwb_dev *uwb_dev)
{
mutex_init(&uwb_dev->mutex);
device_initialize(&uwb_dev->dev);
uwb_dev->dev.release = uwb_dev_sys_release;
uwb_dev_addr_init(&uwb_dev->dev_addr);
uwb_mac_addr_init(&uwb_dev->mac_addr);
bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
}
static ssize_t uwb_dev_EUI_48_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
char addr[UWB_ADDR_STRSIZE];
uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
return sprintf(buf, "%s\n", addr);
}
static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
static ssize_t uwb_dev_DevAddr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
char addr[UWB_ADDR_STRSIZE];
uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
return sprintf(buf, "%s\n", addr);
}
static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
/*
* Show the BPST of this device.
*
* Calculated from the receive time of the device's beacon and it's
* slot number.
*/
static ssize_t uwb_dev_BPST_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_beca_e *bce;
struct uwb_beacon_frame *bf;
u16 bpst;
bce = uwb_dev->bce;
mutex_lock(&bce->mutex);
bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
bpst = bce->be->wBPSTOffset
- (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
mutex_unlock(&bce->mutex);
return sprintf(buf, "%d\n", bpst);
}
static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
/*
* Show the IEs a device is beaconing
*
* We need to access the beacon cache, so we just lock it really
* quick, print the IEs and unlock.
*
* We have a reference on the cache entry, so that should be
* quite safe.
*/
static ssize_t uwb_dev_IEs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
}
static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
static ssize_t uwb_dev_LQE_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_beca_e *bce = uwb_dev->bce;
size_t result;
mutex_lock(&bce->mutex);
result = stats_show(&uwb_dev->bce->lqe_stats, buf);
mutex_unlock(&bce->mutex);
return result;
}
static ssize_t uwb_dev_LQE_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_beca_e *bce = uwb_dev->bce;
ssize_t result;
mutex_lock(&bce->mutex);
result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
mutex_unlock(&bce->mutex);
return result;
}
static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
static ssize_t uwb_dev_RSSI_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_beca_e *bce = uwb_dev->bce;
size_t result;
mutex_lock(&bce->mutex);
result = stats_show(&uwb_dev->bce->rssi_stats, buf);
mutex_unlock(&bce->mutex);
return result;
}
static ssize_t uwb_dev_RSSI_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_beca_e *bce = uwb_dev->bce;
ssize_t result;
mutex_lock(&bce->mutex);
result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
mutex_unlock(&bce->mutex);
return result;
}
static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
static struct attribute *uwb_dev_attrs[] = {
&dev_attr_EUI_48.attr,
&dev_attr_DevAddr.attr,
&dev_attr_BPST.attr,
&dev_attr_IEs.attr,
&dev_attr_LQE.attr,
&dev_attr_RSSI.attr,
NULL,
};
ATTRIBUTE_GROUPS(uwb_dev);
/* UWB bus type. */
struct bus_type uwb_bus_type = {
.name = "uwb",
.dev_groups = uwb_dev_groups,
};
/**
* Device SYSFS registration
*/
static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
{
struct device *dev;
dev = &uwb_dev->dev;
dev->parent = parent_dev;
dev_set_drvdata(dev, uwb_dev);
return device_add(dev);
}
static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
{
dev_set_drvdata(&uwb_dev->dev, NULL);
device_del(&uwb_dev->dev);
}
/**
* Register and initialize a new UWB device
*
* Did you call uwb_dev_init() on it?
*
* @parent_rc: is the parent radio controller who has the link to the
* device. When registering the UWB device that is a UWB
* Radio Controller, we point back to it.
*
* If registering the device that is part of a radio, caller has set
* rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
* be allocated.
*/
int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
struct uwb_rc *parent_rc)
{
int result;
struct device *dev;
BUG_ON(uwb_dev == NULL);
BUG_ON(parent_dev == NULL);
BUG_ON(parent_rc == NULL);
mutex_lock(&uwb_dev->mutex);
dev = &uwb_dev->dev;
uwb_dev->rc = parent_rc;
result = __uwb_dev_sys_add(uwb_dev, parent_dev);
if (result < 0)
printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
dev_name(dev), result);
mutex_unlock(&uwb_dev->mutex);
return result;
}
void uwb_dev_rm(struct uwb_dev *uwb_dev)
{
mutex_lock(&uwb_dev->mutex);
__uwb_dev_sys_rm(uwb_dev);
mutex_unlock(&uwb_dev->mutex);
}
static
int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
{
struct uwb_dev *target_uwb_dev = __target_uwb_dev;
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
if (uwb_dev == target_uwb_dev) {
uwb_dev_get(uwb_dev);
return 1;
} else
return 0;
}
/**
* Given a UWB device descriptor, validate and refcount it
*
* @returns NULL if the device does not exist or is quiescing; the ptr to
* it otherwise.
*/
struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
{
if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
return uwb_dev;
else
return NULL;
}
EXPORT_SYMBOL_GPL(uwb_dev_try_get);
/**
* Remove a device from the system [grunt for other functions]
*/
int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
{
struct device *dev = &uwb_dev->dev;
char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
macbuf, devbuf,
uwb_dev->dev.bus->name,
rc ? dev_name(&(rc->uwb_dev.dev)) : "");
uwb_dev_rm(uwb_dev);
list_del(&uwb_dev->bce->node);
uwb_bce_put(uwb_dev->bce);
uwb_dev_put(uwb_dev); /* for the creation in _onair() */
return 0;
}
/**
* A device went off the air, clean up after it!
*
* This is called by the UWB Daemon (through the beacon purge function
* uwb_bcn_cache_purge) when it is detected that a device has been in
* radio silence for a while.
*
* If this device is actually a local radio controller we don't need
* to go through the offair process, as it is not registered as that.
*
* NOTE: uwb_bcn_cache.mutex is held!
*/
void uwbd_dev_offair(struct uwb_beca_e *bce)
{
struct uwb_dev *uwb_dev;
uwb_dev = bce->uwb_dev;
if (uwb_dev) {
uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
__uwb_dev_offair(uwb_dev, uwb_dev->rc);
}
}
/**
* A device went on the air, start it up!
*
* This is called by the UWB Daemon when it is detected that a device
* has popped up in the radio range of the radio controller.
*
* It will just create the freaking device, register the beacon and
* stuff and yatla, done.
*
*
* NOTE: uwb_beca.mutex is held, bce->mutex is held
*/
void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_dev *uwb_dev;
char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
if (uwb_dev == NULL) {
dev_err(dev, "new device %s: Cannot allocate memory\n",
macbuf);
return;
}
uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */
uwb_dev->dev.bus = &uwb_bus_type;
uwb_dev->mac_addr = *bce->mac_addr;
uwb_dev->dev_addr = bce->dev_addr;
dev_set_name(&uwb_dev->dev, "%s", macbuf);
/* plug the beacon cache */
bce->uwb_dev = uwb_dev;
uwb_dev->bce = bce;
uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
if (result < 0) {
dev_err(dev, "new device %s: cannot instantiate device\n",
macbuf);
goto error_dev_add;
}
dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
macbuf, devbuf, uwb_dev->dev.bus->name,
dev_name(&(rc->uwb_dev.dev)));
uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
return;
error_dev_add:
bce->uwb_dev = NULL;
uwb_bce_put(bce);
kfree(uwb_dev);
return;
}
/**
* Iterate over the list of UWB devices, calling a @function on each
*
* See docs for bus_for_each()....
*
* @rc: radio controller for the devices.
* @function: function to call.
* @priv: data to pass to @function.
* @returns: 0 if no invocation of function() returned a value
* different to zero. That value otherwise.
*/
int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
{
return device_for_each_child(&rc->uwb_dev.dev, priv, function);
}
EXPORT_SYMBOL_GPL(uwb_dev_for_each);

View File

@ -1,569 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Life cycle of radio controllers
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*
* A UWB radio controller is also a UWB device, so it embeds one...
*
* List of RCs comes from the 'struct class uwb_rc_class'.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/kdev_t.h>
#include <linux/etherdevice.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "uwb-internal.h"
static int uwb_rc_index_match(struct device *dev, const void *data)
{
const int *index = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->index == *index)
return 1;
return 0;
}
static struct uwb_rc *uwb_rc_find_by_index(int index)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
if (dev) {
rc = dev_get_drvdata(dev);
put_device(dev);
}
return rc;
}
static int uwb_rc_new_index(void)
{
int index = 0;
for (;;) {
if (!uwb_rc_find_by_index(index))
return index;
if (++index < 0)
index = 0;
}
}
/**
* Release the backing device of a uwb_rc that has been dynamically allocated.
*/
static void uwb_rc_sys_release(struct device *dev)
{
struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
uwb_rc_ie_release(rc);
kfree(rc);
}
void uwb_rc_init(struct uwb_rc *rc)
{
struct uwb_dev *uwb_dev = &rc->uwb_dev;
uwb_dev_init(uwb_dev);
rc->uwb_dev.dev.class = &uwb_rc_class;
rc->uwb_dev.dev.release = uwb_rc_sys_release;
uwb_rc_neh_create(rc);
rc->beaconing = -1;
rc->scan_type = UWB_SCAN_DISABLED;
INIT_LIST_HEAD(&rc->notifs_chain.list);
mutex_init(&rc->notifs_chain.mutex);
INIT_LIST_HEAD(&rc->uwb_beca.list);
mutex_init(&rc->uwb_beca.mutex);
uwb_drp_avail_init(rc);
uwb_rc_ie_init(rc);
uwb_rsv_init(rc);
uwb_rc_pal_init(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_init);
struct uwb_rc *uwb_rc_alloc(void)
{
struct uwb_rc *rc;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (rc == NULL)
return NULL;
uwb_rc_init(rc);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_alloc);
/*
* Show the ASIE that is broadcast in the UWB beacon by this uwb_rc device.
*/
static ssize_t ASIE_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
struct uwb_ie_hdr *ie;
void *ptr;
size_t len;
int result = 0;
/* init empty buffer. */
result = scnprintf(buf, PAGE_SIZE, "\n");
mutex_lock(&rc->ies_mutex);
/* walk IEData looking for an ASIE. */
ptr = rc->ies->IEData;
len = le16_to_cpu(rc->ies->wIELength);
for (;;) {
ie = uwb_ie_next(&ptr, &len);
if (!ie)
break;
if (ie->element_id == UWB_APP_SPEC_IE) {
result = uwb_ie_dump_hex(ie,
ie->length + sizeof(struct uwb_ie_hdr),
buf, PAGE_SIZE);
break;
}
}
mutex_unlock(&rc->ies_mutex);
return result;
}
/*
* Update the ASIE that is broadcast in the UWB beacon by this uwb_rc device.
*/
static ssize_t ASIE_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
char ie_buf[255];
int result, ie_len = 0;
const char *cur_ptr = buf;
struct uwb_ie_hdr *ie;
/* empty string means clear the ASIE. */
if (strlen(buf) <= 1) {
uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE);
return size;
}
/* if non-empty string, convert string of hex chars to binary. */
while (ie_len < sizeof(ie_buf)) {
int char_count;
if (sscanf(cur_ptr, " %02hhX %n",
&(ie_buf[ie_len]), &char_count) > 0) {
++ie_len;
/* skip chars read from cur_ptr. */
cur_ptr += char_count;
} else {
break;
}
}
/* validate IE length and type. */
if (ie_len < sizeof(struct uwb_ie_hdr)) {
dev_err(dev, "%s: Invalid ASIE size %d.\n", __func__, ie_len);
return -EINVAL;
}
ie = (struct uwb_ie_hdr *)ie_buf;
if (ie->element_id != UWB_APP_SPEC_IE) {
dev_err(dev, "%s: Invalid IE element type size = 0x%02X.\n",
__func__, ie->element_id);
return -EINVAL;
}
/* bounds check length field from user. */
if (ie->length > (ie_len - sizeof(struct uwb_ie_hdr)))
ie->length = ie_len - sizeof(struct uwb_ie_hdr);
/*
* Valid ASIE received. Remove current ASIE then add the new one using
* uwb_rc_ie_add.
*/
uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE);
result = uwb_rc_ie_add(rc, ie, ie->length + sizeof(struct uwb_ie_hdr));
return result >= 0 ? size : result;
}
static DEVICE_ATTR_RW(ASIE);
static struct attribute *rc_attrs[] = {
&dev_attr_mac_address.attr,
&dev_attr_scan.attr,
&dev_attr_beacon.attr,
&dev_attr_ASIE.attr,
NULL,
};
static const struct attribute_group rc_attr_group = {
.attrs = rc_attrs,
};
/*
* Registration of sysfs specific stuff
*/
static int uwb_rc_sys_add(struct uwb_rc *rc)
{
return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
static void __uwb_rc_sys_rm(struct uwb_rc *rc)
{
sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
/**
* uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
* @rc: the radio controller.
*
* If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
* then a random locally administered EUI-48 is generated and set on
* the device. The probability of address collisions is sufficiently
* unlikely (1/2^40 = 9.1e-13) that they're not checked for.
*/
static
int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_dev *uwb_dev = &rc->uwb_dev;
char devname[UWB_ADDR_STRSIZE];
struct uwb_mac_addr addr;
result = uwb_rc_mac_addr_get(rc, &addr);
if (result < 0) {
dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
return result;
}
if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
addr.data[0] = 0x02; /* locally administered and unicast */
get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
result = uwb_rc_mac_addr_set(rc, &addr);
if (result < 0) {
uwb_mac_addr_print(devname, sizeof(devname), &addr);
dev_err(dev, "cannot set EUI-48 address %s: %d\n",
devname, result);
return result;
}
}
uwb_dev->mac_addr = addr;
return 0;
}
static int uwb_rc_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
result = uwb_radio_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio: %d\n", result);
goto error;
}
result = uwb_rc_mac_addr_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
goto error;
}
result = uwb_rc_dev_addr_assign(rc);
if (result < 0) {
dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
goto error;
}
result = uwb_rc_ie_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup IE subsystem: %d\n", result);
goto error_ie_setup;
}
result = uwb_rsv_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
goto error_rsv_setup;
}
uwb_dbg_add_rc(rc);
return 0;
error_rsv_setup:
uwb_rc_ie_release(rc);
error_ie_setup:
error:
return result;
}
/**
* Register a new UWB radio controller
*
* Did you call uwb_rc_init() on your rc?
*
* We assume that this is being called with a > 0 refcount on
* it [through ops->{get|put}_device(). We'll take our own, though.
*
* @parent_dev is our real device, the one that provides the actual UWB device
*/
int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
{
int result;
struct device *dev;
char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
rc->index = uwb_rc_new_index();
dev = &rc->uwb_dev.dev;
dev_set_name(dev, "uwb%d", rc->index);
rc->priv = priv;
init_waitqueue_head(&rc->uwbd.wq);
INIT_LIST_HEAD(&rc->uwbd.event_list);
spin_lock_init(&rc->uwbd.event_list_lock);
uwbd_start(rc);
result = rc->start(rc);
if (result < 0)
goto error_rc_start;
result = uwb_rc_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
goto error_rc_setup;
}
result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
if (result < 0 && result != -EADDRNOTAVAIL)
goto error_dev_add;
result = uwb_rc_sys_add(rc);
if (result < 0) {
dev_err(parent_dev, "cannot register UWB radio controller "
"dev attributes: %d\n", result);
goto error_sys_add;
}
uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
dev_info(dev,
"new uwb radio controller (mac %s dev %s) on %s %s\n",
macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
rc->ready = 1;
return 0;
error_sys_add:
uwb_dev_rm(&rc->uwb_dev);
error_dev_add:
error_rc_setup:
rc->stop(rc);
error_rc_start:
uwbd_stop(rc);
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_add);
static int uwb_dev_offair_helper(struct device *dev, void *priv)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
}
/*
* Remove a Radio Controller; stop beaconing/scanning, disconnect all children
*/
void uwb_rc_rm(struct uwb_rc *rc)
{
rc->ready = 0;
uwb_dbg_del_rc(rc);
uwb_rsv_remove_all(rc);
uwb_radio_shutdown(rc);
rc->stop(rc);
uwbd_stop(rc);
uwb_rc_neh_destroy(rc);
uwb_dev_lock(&rc->uwb_dev);
rc->priv = NULL;
rc->cmd = NULL;
uwb_dev_unlock(&rc->uwb_dev);
mutex_lock(&rc->uwb_beca.mutex);
uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
__uwb_rc_sys_rm(rc);
mutex_unlock(&rc->uwb_beca.mutex);
uwb_rsv_cleanup(rc);
uwb_beca_release(rc);
uwb_dev_rm(&rc->uwb_dev);
}
EXPORT_SYMBOL_GPL(uwb_rc_rm);
static int find_rc_try_get(struct device *dev, const void *data)
{
const struct uwb_rc *target_rc = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (rc == target_rc) {
if (rc->ready == 0)
return 0;
else
return 1;
}
return 0;
}
/**
* Given a radio controller descriptor, validate and refcount it
*
* @returns NULL if the rc does not exist or is quiescing; the ptr to
* it otherwise.
*/
struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, target_rc,
find_rc_try_get);
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
put_device(dev);
}
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
/*
* RC get for external refcount acquirers...
*
* Increments the refcount of the device and it's backend modules
*/
static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
{
if (rc->ready == 0)
return NULL;
uwb_dev_get(&rc->uwb_dev);
return rc;
}
static int find_rc_grandpa(struct device *dev, const void *data)
{
const struct device *grandpa_dev = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
/**
* Locate and refcount a radio controller given a common grand-parent
*
* @grandpa_dev Pointer to the 'grandparent' device structure.
* @returns NULL If the rc does not exist or is quiescing; the ptr to
* it otherwise, properly referenced.
*
* The Radio Control interface (or the UWB Radio Controller) is always
* an interface of a device. The parent is the interface, the
* grandparent is the device that encapsulates the interface.
*
* There is no need to lock around as the "grandpa" would be
* refcounted by the target, and to remove the referemes, the
* uwb_rc_class->sem would have to be taken--we hold it, ergo we
* should be safe.
*/
struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
if (dev) {
rc = dev_get_drvdata(dev);
put_device(dev);
}
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
/**
* Find a radio controller by device address
*
* @returns the pointer to the radio controller, properly referenced
*/
static int find_rc_dev(struct device *dev, const void *data)
{
const struct uwb_dev_addr *addr = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
if (dev) {
rc = dev_get_drvdata(dev);
put_device(dev);
}
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
/**
* Drop a reference on a radio controller
*
* This is the version that should be done by entities external to the
* UWB Radio Control stack (ie: clients of the API).
*/
void uwb_rc_put(struct uwb_rc *rc)
{
__uwb_rc_put(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_put);

View File

@ -1,606 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* WUSB Wire Adapter: Radio Control Interface (WUSB[8])
* Notification and Event Handling
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
* card delivers a stream of notifications and events to the
* notification end event endpoint or area. This code takes care of
* getting a buffer with that data, breaking it up in separate
* notifications and events and then deliver those.
*
* Events are answers to commands and they carry a context ID that
* associates them to the command. Notifications are that,
* notifications, they come out of the blue and have a context ID of
* zero. Think of the context ID kind of like a handler. The
* uwb_rc_neh_* code deals with managing context IDs.
*
* This is why you require a handle to operate on a UWB host. When you
* open a handle a context ID is assigned to you.
*
* So, as it is done is:
*
* 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
* 2. Issue command [rc->cmd(rc, ...)]
* 3. Arm the timeout timer [uwb_rc_neh_arm()]
* 4, Release the reference to the neh [uwb_rc_neh_put()]
* 5. Wait for the callback
* 6. Command result (RCEB) is passed to the callback
*
* If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
* instead of arming the timer.
*
* Handles are for using in *serialized* code, single thread.
*
* When the notification/event comes, the IRQ handler/endpoint
* callback passes the data read to uwb_rc_neh_grok() which will break
* it up in a discrete series of events, look up who is listening for
* them and execute the pertinent callbacks.
*
* If the reader detects an error while reading the data stream, call
* uwb_rc_neh_error().
*
* CONSTRAINTS/ASSUMPTIONS:
*
* - Most notifications/events are small (less thank .5k), copying
* around is ok.
*
* - Notifications/events are ALWAYS smaller than PAGE_SIZE
*
* - Notifications/events always come in a single piece (ie: a buffer
* will always contain entire notifications/events).
*
* - we cannot know in advance how long each event is (because they
* lack a length field in their header--smart move by the standards
* body, btw). So we need a facility to get the event size given the
* header. This is what the EST code does (notif/Event Size
* Tables), check nest.c--as well, you can associate the size to
* the handle [w/ neh->extra_size()].
*
* - Most notifications/events are fixed size; only a few are variable
* size (NEST takes care of that).
*
* - Listeners of events expect them, so they usually provide a
* buffer, as they know the size. Listeners to notifications don't,
* so we allocate their buffers dynamically.
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include "uwb-internal.h"
/*
* UWB Radio Controller Notification/Event Handle
*
* Represents an entity waiting for an event coming from the UWB Radio
* Controller with a given context id (context) and type (evt_type and
* evt). On reception of the notification/event, the callback (cb) is
* called with the event.
*
* If the timer expires before the event is received, the callback is
* called with -ETIMEDOUT as the event size.
*/
struct uwb_rc_neh {
struct kref kref;
struct uwb_rc *rc;
u8 evt_type;
__le16 evt;
u8 context;
u8 completed;
uwb_rc_cmd_cb_f cb;
void *arg;
struct timer_list timer;
struct list_head list_node;
};
static void uwb_rc_neh_timer(struct timer_list *t);
static void uwb_rc_neh_release(struct kref *kref)
{
struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
kfree(neh);
}
static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
{
kref_get(&neh->kref);
}
/**
* uwb_rc_neh_put - release reference to a neh
* @neh: the neh
*/
void uwb_rc_neh_put(struct uwb_rc_neh *neh)
{
kref_put(&neh->kref, uwb_rc_neh_release);
}
/**
* Assigns @neh a context id from @rc's pool
*
* @rc: UWB Radio Controller descriptor; @rc->neh_lock taken
* @neh: Notification/Event Handle
* @returns 0 if context id was assigned ok; < 0 errno on error (if
* all the context IDs are taken).
*
* (assumes @wa is locked).
*
* NOTE: WUSB spec reserves context ids 0x00 for notifications and
* 0xff is invalid, so they must not be used. Initialization
* fills up those two in the bitmap so they are not allocated.
*
* We spread the allocation around to reduce the possibility of two
* consecutive opened @neh's getting the same context ID assigned (to
* avoid surprises with late events that timed out long time ago). So
* first we search from where @rc->ctx_roll is, if not found, we
* search from zero.
*/
static
int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
{
int result;
result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
rc->ctx_roll++);
if (result < UWB_RC_CTX_MAX)
goto found;
result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
if (result < UWB_RC_CTX_MAX)
goto found;
return -ENFILE;
found:
set_bit(result, rc->ctx_bm);
neh->context = result;
return 0;
}
/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
static
void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
{
struct device *dev = &rc->uwb_dev.dev;
if (neh->context == 0)
return;
if (test_bit(neh->context, rc->ctx_bm) == 0) {
dev_err(dev, "context %u not set in bitmap\n",
neh->context);
WARN_ON(1);
}
clear_bit(neh->context, rc->ctx_bm);
neh->context = 0;
}
/**
* uwb_rc_neh_add - add a neh for a radio controller command
* @rc: the radio controller
* @cmd: the radio controller command
* @expected_type: the type of the expected response event
* @expected_event: the expected event ID
* @cb: callback for when the event is received
* @arg: argument for the callback
*
* Creates a neh and adds it to the list of those waiting for an
* event. A context ID will be assigned to the command.
*/
struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg)
{
int result;
unsigned long flags;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_neh *neh;
neh = kzalloc(sizeof(*neh), GFP_KERNEL);
if (neh == NULL) {
result = -ENOMEM;
goto error_kzalloc;
}
kref_init(&neh->kref);
INIT_LIST_HEAD(&neh->list_node);
timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
neh->rc = rc;
neh->evt_type = expected_type;
neh->evt = cpu_to_le16(expected_event);
neh->cb = cb;
neh->arg = arg;
spin_lock_irqsave(&rc->neh_lock, flags);
result = __uwb_rc_ctx_get(rc, neh);
if (result >= 0) {
cmd->bCommandContext = neh->context;
list_add_tail(&neh->list_node, &rc->neh_list);
uwb_rc_neh_get(neh);
}
spin_unlock_irqrestore(&rc->neh_lock, flags);
if (result < 0)
goto error_ctx_get;
return neh;
error_ctx_get:
kfree(neh);
error_kzalloc:
dev_err(dev, "cannot open handle to radio controller: %d\n", result);
return ERR_PTR(result);
}
static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
{
__uwb_rc_ctx_put(rc, neh);
list_del(&neh->list_node);
}
/**
* uwb_rc_neh_rm - remove a neh.
* @rc: the radio controller
* @neh: the neh to remove
*
* Remove an active neh immediately instead of waiting for the event
* (or a time out).
*/
void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
{
unsigned long flags;
spin_lock_irqsave(&rc->neh_lock, flags);
__uwb_rc_neh_rm(rc, neh);
spin_unlock_irqrestore(&rc->neh_lock, flags);
del_timer_sync(&neh->timer);
uwb_rc_neh_put(neh);
}
/**
* uwb_rc_neh_arm - arm an event handler timeout timer
*
* @rc: UWB Radio Controller
* @neh: Notification/event handler for @rc
*
* The timer is only armed if the neh is active.
*/
void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
{
unsigned long flags;
spin_lock_irqsave(&rc->neh_lock, flags);
if (neh->context)
mod_timer(&neh->timer,
jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
spin_unlock_irqrestore(&rc->neh_lock, flags);
}
static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
{
(*neh->cb)(neh->rc, neh->arg, rceb, size);
uwb_rc_neh_put(neh);
}
static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
{
return neh->evt_type == rceb->bEventType
&& neh->evt == rceb->wEvent
&& neh->context == rceb->bEventContext;
}
/**
* Find the handle waiting for a RC Radio Control Event
*
* @rc: UWB Radio Controller
* @rceb: Pointer to the RCEB buffer
* @event_size: Pointer to the size of the RCEB buffer. Might be
* adjusted to take into account the @neh->extra_size
* settings.
*
* If the listener has no buffer (NULL buffer), one is allocated for
* the right size (the amount of data received). @neh->ptr will point
* to the event payload, which always starts with a 'struct
* uwb_rceb'. kfree() it when done.
*/
static
struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
const struct uwb_rceb *rceb)
{
struct uwb_rc_neh *neh = NULL, *h;
unsigned long flags;
spin_lock_irqsave(&rc->neh_lock, flags);
list_for_each_entry(h, &rc->neh_list, list_node) {
if (uwb_rc_neh_match(h, rceb)) {
neh = h;
break;
}
}
if (neh)
__uwb_rc_neh_rm(rc, neh);
spin_unlock_irqrestore(&rc->neh_lock, flags);
return neh;
}
/*
* Process notifications coming from the radio control interface
*
* @rc: UWB Radio Control Interface descriptor
* @neh: Notification/Event Handler @neh->ptr points to
* @uwb_evt->buffer.
*
* This function is called by the event/notif handling subsystem when
* notifications arrive (hwarc_probe() arms a notification/event handle
* that calls back this function for every received notification; this
* function then will rearm itself).
*
* Notification data buffers are dynamically allocated by the NEH
* handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
* allocated is space to contain the notification data.
*
* Buffers are prefixed with a Radio Control Event Block (RCEB) as
* defined by the WUSB Wired-Adapter Radio Control interface. We
* just use it for the notification code.
*
* On each case statement we just transcode endianess of the different
* fields. We declare a pointer to a RCI definition of an event, and
* then to a UWB definition of the same event (which are the same,
* remember). Event if we use different pointers
*/
static
void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_event *uwb_evt;
if (size == -ESHUTDOWN)
return;
if (size < 0) {
dev_err(dev, "ignoring event with error code %zu\n",
size);
return;
}
uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
if (unlikely(uwb_evt == NULL)) {
dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
rceb->bEventType, le16_to_cpu(rceb->wEvent),
rceb->bEventContext);
return;
}
uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
uwb_evt->ts_jiffies = jiffies;
uwb_evt->type = UWB_EVT_TYPE_NOTIF;
uwb_evt->notif.size = size;
uwb_evt->notif.rceb = rceb;
uwbd_event_queue(uwb_evt);
}
static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_neh *neh;
struct uwb_rceb *notif;
unsigned long flags;
if (rceb->bEventContext == 0) {
notif = kmalloc(size, GFP_ATOMIC);
if (notif) {
memcpy(notif, rceb, size);
uwb_rc_notif(rc, notif, size);
} else
dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
rceb->bEventType, le16_to_cpu(rceb->wEvent),
rceb->bEventContext, size);
} else {
neh = uwb_rc_neh_lookup(rc, rceb);
if (neh) {
spin_lock_irqsave(&rc->neh_lock, flags);
/* to guard against a timeout */
neh->completed = 1;
del_timer(&neh->timer);
spin_unlock_irqrestore(&rc->neh_lock, flags);
uwb_rc_neh_cb(neh, rceb, size);
} else
dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
rceb->bEventType, le16_to_cpu(rceb->wEvent),
rceb->bEventContext, size);
}
}
/**
* Given a buffer with one or more UWB RC events/notifications, break
* them up and dispatch them.
*
* @rc: UWB Radio Controller
* @buf: Buffer with the stream of notifications/events
* @buf_size: Amount of data in the buffer
*
* Note each notification/event starts always with a 'struct
* uwb_rceb', so the minimum size if 4 bytes.
*
* The device may pass us events formatted differently than expected.
* These are first filtered, potentially creating a new event in a new
* memory location. If a new event is created by the filter it is also
* freed here.
*
* For each notif/event, tries to guess the size looking at the EST
* tables, then looks for a neh that is waiting for that event and if
* found, copies the payload to the neh's buffer and calls it back. If
* not, the data is ignored.
*
* Note that if we can't find a size description in the EST tables, we
* still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
*
* Assumptions:
*
* @rc->neh_lock is NOT taken
*
* We keep track of various sizes here:
* size: contains the size of the buffer that is processed for the
* incoming event. this buffer may contain events that are not
* formatted as WHCI.
* real_size: the actual space taken by this event in the buffer.
* We need to keep track of the real size of an event to be able to
* advance the buffer correctly.
* event_size: the size of the event as expected by the core layer
* [OR] the size of the event after filtering. if the filtering
* created a new event in a new memory location then this is
* effectively the size of a new event buffer
*/
void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
{
struct device *dev = &rc->uwb_dev.dev;
void *itr;
struct uwb_rceb *rceb;
size_t size, real_size, event_size;
int needtofree;
itr = buf;
size = buf_size;
while (size > 0) {
if (size < sizeof(*rceb)) {
dev_err(dev, "not enough data in event buffer to "
"process incoming events (%zu left, minimum is "
"%zu)\n", size, sizeof(*rceb));
break;
}
rceb = itr;
if (rc->filter_event) {
needtofree = rc->filter_event(rc, &rceb, size,
&real_size, &event_size);
if (needtofree < 0 && needtofree != -ENOANO) {
dev_err(dev, "BUG: Unable to filter event "
"(0x%02x/%04x/%02x) from "
"device. \n", rceb->bEventType,
le16_to_cpu(rceb->wEvent),
rceb->bEventContext);
break;
}
} else
needtofree = -ENOANO;
/* do real processing if there was no filtering or the
* filtering didn't act */
if (needtofree == -ENOANO) {
ssize_t ret = uwb_est_find_size(rc, rceb, size);
if (ret < 0)
break;
if (ret > size) {
dev_err(dev, "BUG: hw sent incomplete event "
"0x%02x/%04x/%02x (%zd bytes), only got "
"%zu bytes. We don't handle that.\n",
rceb->bEventType, le16_to_cpu(rceb->wEvent),
rceb->bEventContext, ret, size);
break;
}
real_size = event_size = ret;
}
uwb_rc_neh_grok_event(rc, rceb, event_size);
if (needtofree == 1)
kfree(rceb);
itr += real_size;
size -= real_size;
}
}
EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
/**
* The entity that reads from the device notification/event channel has
* detected an error.
*
* @rc: UWB Radio Controller
* @error: Errno error code
*
*/
void uwb_rc_neh_error(struct uwb_rc *rc, int error)
{
struct uwb_rc_neh *neh;
unsigned long flags;
for (;;) {
spin_lock_irqsave(&rc->neh_lock, flags);
if (list_empty(&rc->neh_list)) {
spin_unlock_irqrestore(&rc->neh_lock, flags);
break;
}
neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
__uwb_rc_neh_rm(rc, neh);
spin_unlock_irqrestore(&rc->neh_lock, flags);
del_timer_sync(&neh->timer);
uwb_rc_neh_cb(neh, NULL, error);
}
}
EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
static void uwb_rc_neh_timer(struct timer_list *t)
{
struct uwb_rc_neh *neh = from_timer(neh, t, timer);
struct uwb_rc *rc = neh->rc;
unsigned long flags;
spin_lock_irqsave(&rc->neh_lock, flags);
if (neh->completed) {
spin_unlock_irqrestore(&rc->neh_lock, flags);
return;
}
if (neh->context)
__uwb_rc_neh_rm(rc, neh);
else
neh = NULL;
spin_unlock_irqrestore(&rc->neh_lock, flags);
if (neh)
uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
}
/** Initializes the @rc's neh subsystem
*/
void uwb_rc_neh_create(struct uwb_rc *rc)
{
spin_lock_init(&rc->neh_lock);
INIT_LIST_HEAD(&rc->neh_list);
set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */
set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */
rc->ctx_roll = 1;
}
/** Release's the @rc's neh subsystem */
void uwb_rc_neh_destroy(struct uwb_rc *rc)
{
unsigned long flags;
struct uwb_rc_neh *neh;
for (;;) {
spin_lock_irqsave(&rc->neh_lock, flags);
if (list_empty(&rc->neh_list)) {
spin_unlock_irqrestore(&rc->neh_lock, flags);
break;
}
neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
__uwb_rc_neh_rm(rc, neh);
spin_unlock_irqrestore(&rc->neh_lock, flags);
del_timer_sync(&neh->timer);
uwb_rc_neh_put(neh);
}
}

View File

@ -1,128 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB PAL support.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include "uwb.h"
#include "uwb-internal.h"
/**
* uwb_pal_init - initialize a UWB PAL
* @pal: the PAL to initialize
*/
void uwb_pal_init(struct uwb_pal *pal)
{
INIT_LIST_HEAD(&pal->node);
}
EXPORT_SYMBOL_GPL(uwb_pal_init);
/**
* uwb_pal_register - register a UWB PAL
* @pal: the PAL
*
* The PAL must be initialized with uwb_pal_init().
*/
int uwb_pal_register(struct uwb_pal *pal)
{
struct uwb_rc *rc = pal->rc;
int ret;
if (pal->device) {
/* create a link to the uwb_rc in the PAL device's directory. */
ret = sysfs_create_link(&pal->device->kobj,
&rc->uwb_dev.dev.kobj, "uwb_rc");
if (ret < 0)
return ret;
/* create a link to the PAL in the UWB device's directory. */
ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
&pal->device->kobj, pal->name);
if (ret < 0) {
sysfs_remove_link(&pal->device->kobj, "uwb_rc");
return ret;
}
}
pal->debugfs_dir = uwb_dbg_create_pal_dir(pal);
mutex_lock(&rc->uwb_dev.mutex);
list_add(&pal->node, &rc->pals);
mutex_unlock(&rc->uwb_dev.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(uwb_pal_register);
static int find_rc(struct device *dev, const void *data)
{
const struct uwb_rc *target_rc = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (rc == target_rc) {
if (rc->ready == 0)
return 0;
else
return 1;
}
return 0;
}
/**
* Given a radio controller descriptor see if it is registered.
*
* @returns false if the rc does not exist or is quiescing; true otherwise.
*/
static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
{
struct device *dev;
dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
put_device(dev);
return (dev != NULL);
}
/**
* uwb_pal_unregister - unregister a UWB PAL
* @pal: the PAL
*/
void uwb_pal_unregister(struct uwb_pal *pal)
{
struct uwb_rc *rc = pal->rc;
uwb_radio_stop(pal);
mutex_lock(&rc->uwb_dev.mutex);
list_del(&pal->node);
mutex_unlock(&rc->uwb_dev.mutex);
debugfs_remove(pal->debugfs_dir);
if (pal->device) {
/* remove link to the PAL in the UWB device's directory. */
if (uwb_rc_class_device_exists(rc))
sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
/* remove link to uwb_rc in the PAL device's directory. */
sysfs_remove_link(&pal->device->kobj, "uwb_rc");
}
}
EXPORT_SYMBOL_GPL(uwb_pal_unregister);
/**
* uwb_rc_pal_init - initialize the PAL related parts of a radio controller
* @rc: the radio controller
*/
void uwb_rc_pal_init(struct uwb_rc *rc)
{
INIT_LIST_HEAD(&rc->pals);
}

View File

@ -1,196 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB radio (channel) management.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include "uwb.h"
#include "uwb-internal.h"
static int uwb_radio_select_channel(struct uwb_rc *rc)
{
/*
* Default to channel 9 (BG1, TFC1) unless the user has
* selected a specific channel or there are no active PALs.
*/
if (rc->active_pals == 0)
return -1;
if (rc->beaconing_forced)
return rc->beaconing_forced;
return 9;
}
/*
* Notify all active PALs that the channel has changed.
*/
static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel)
{
struct uwb_pal *pal;
list_for_each_entry(pal, &rc->pals, node) {
if (pal->channel && channel != pal->channel) {
pal->channel = channel;
if (pal->channel_changed)
pal->channel_changed(pal, pal->channel);
}
}
}
/*
* Change to a new channel and notify any active PALs of the new
* channel.
*
* When stopping the radio, PALs need to be notified first so they can
* terminate any active reservations.
*/
static int uwb_radio_change_channel(struct uwb_rc *rc, int channel)
{
int ret = 0;
struct device *dev = &rc->uwb_dev.dev;
dev_dbg(dev, "%s: channel = %d, rc->beaconing = %d\n", __func__,
channel, rc->beaconing);
if (channel == -1)
uwb_radio_channel_changed(rc, channel);
if (channel != rc->beaconing) {
if (rc->beaconing != -1 && channel != -1) {
/*
* FIXME: should signal the channel change
* with a Channel Change IE.
*/
ret = uwb_radio_change_channel(rc, -1);
if (ret < 0)
return ret;
}
ret = uwb_rc_beacon(rc, channel, 0);
}
if (channel != -1)
uwb_radio_channel_changed(rc, rc->beaconing);
return ret;
}
/**
* uwb_radio_start - request that the radio be started
* @pal: the PAL making the request.
*
* If the radio is not already active, a suitable channel is selected
* and beacons are started.
*/
int uwb_radio_start(struct uwb_pal *pal)
{
struct uwb_rc *rc = pal->rc;
int ret = 0;
mutex_lock(&rc->uwb_dev.mutex);
if (!pal->channel) {
pal->channel = -1;
rc->active_pals++;
ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
}
mutex_unlock(&rc->uwb_dev.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(uwb_radio_start);
/**
* uwb_radio_stop - request that the radio be stopped.
* @pal: the PAL making the request.
*
* Stops the radio if no other PAL is making use of it.
*/
void uwb_radio_stop(struct uwb_pal *pal)
{
struct uwb_rc *rc = pal->rc;
mutex_lock(&rc->uwb_dev.mutex);
if (pal->channel) {
rc->active_pals--;
uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
pal->channel = 0;
}
mutex_unlock(&rc->uwb_dev.mutex);
}
EXPORT_SYMBOL_GPL(uwb_radio_stop);
/*
* uwb_radio_force_channel - force a specific channel to be used
* @rc: the radio controller.
* @channel: the channel to use; -1 to force the radio to stop; 0 to
* use the default channel selection algorithm.
*/
int uwb_radio_force_channel(struct uwb_rc *rc, int channel)
{
int ret = 0;
mutex_lock(&rc->uwb_dev.mutex);
rc->beaconing_forced = channel;
ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
mutex_unlock(&rc->uwb_dev.mutex);
return ret;
}
/*
* uwb_radio_setup - setup the radio manager
* @rc: the radio controller.
*
* The radio controller is reset to ensure it's in a known state
* before it's used.
*/
int uwb_radio_setup(struct uwb_rc *rc)
{
return uwb_rc_reset(rc);
}
/*
* uwb_radio_reset_state - reset any radio manager state
* @rc: the radio controller.
*
* All internal radio manager state is reset to values corresponding
* to a reset radio controller.
*/
void uwb_radio_reset_state(struct uwb_rc *rc)
{
struct uwb_pal *pal;
mutex_lock(&rc->uwb_dev.mutex);
list_for_each_entry(pal, &rc->pals, node) {
if (pal->channel) {
pal->channel = -1;
if (pal->channel_changed)
pal->channel_changed(pal, -1);
}
}
rc->beaconing = -1;
rc->scanning = -1;
mutex_unlock(&rc->uwb_dev.mutex);
}
/*
* uwb_radio_shutdown - shutdown the radio manager
* @rc: the radio controller.
*
* The radio controller is reset.
*/
void uwb_radio_shutdown(struct uwb_rc *rc)
{
uwb_radio_reset_state(rc);
uwb_rc_reset(rc);
}

View File

@ -1,379 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* UWB basic command support and radio reset
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME:
*
* - docs
*
* - Now we are serializing (using the uwb_dev->mutex) the command
* execution; it should be parallelized as much as possible some
* day.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/export.h>
#include "uwb-internal.h"
/**
* Command result codes (WUSB1.0[T8-69])
*/
static
const char *__strerror[] = {
"success",
"failure",
"hardware failure",
"no more slots",
"beacon is too large",
"invalid parameter",
"unsupported power level",
"time out (wa) or invalid ie data (whci)",
"beacon size exceeded",
"cancelled",
"invalid state",
"invalid size",
"ack not received",
"no more asie notification",
};
/** Return a string matching the given error code */
const char *uwb_rc_strerror(unsigned code)
{
if (code == 255)
return "time out";
if (code >= ARRAY_SIZE(__strerror))
return "unknown error";
return __strerror[code];
}
int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg)
{
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_neh *neh;
int needtofree = 0;
int result;
uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */
if (rc->priv == NULL) {
uwb_dev_unlock(&rc->uwb_dev);
return -ESHUTDOWN;
}
if (rc->filter_cmd) {
needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
if (needtofree < 0 && needtofree != -ENOANO) {
dev_err(dev, "%s: filter error: %d\n",
cmd_name, needtofree);
uwb_dev_unlock(&rc->uwb_dev);
return needtofree;
}
}
neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
if (IS_ERR(neh)) {
result = PTR_ERR(neh);
uwb_dev_unlock(&rc->uwb_dev);
goto out;
}
result = rc->cmd(rc, cmd, cmd_size);
uwb_dev_unlock(&rc->uwb_dev);
if (result < 0)
uwb_rc_neh_rm(rc, neh);
else
uwb_rc_neh_arm(rc, neh);
uwb_rc_neh_put(neh);
out:
if (needtofree == 1)
kfree(cmd);
return result < 0 ? result : 0;
}
EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
struct uwb_rc_cmd_done_params {
struct completion completion;
struct uwb_rceb *reply;
ssize_t reply_size;
};
static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
struct uwb_rceb *reply, ssize_t reply_size)
{
struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
if (reply_size > 0) {
if (p->reply)
reply_size = min(p->reply_size, reply_size);
else
p->reply = kmalloc(reply_size, GFP_ATOMIC);
if (p->reply)
memcpy(p->reply, reply, reply_size);
else
reply_size = -ENOMEM;
}
p->reply_size = reply_size;
complete(&p->completion);
}
/**
* Generic function for issuing commands to the Radio Control Interface
*
* @rc: UWB Radio Control descriptor
* @cmd_name: Name of the command being issued (for error messages)
* @cmd: Pointer to rccb structure containing the command;
* normally you embed this structure as the first member of
* the full command structure.
* @cmd_size: Size of the whole command buffer pointed to by @cmd.
* @reply: Pointer to where to store the reply
* @reply_size: @reply's size
* @expected_type: Expected type in the return event
* @expected_event: Expected event code in the return event
* @preply: Here a pointer to where the event data is received will
* be stored. Once done with the data, free with kfree().
*
* This function is generic; it works for commands that return a fixed
* and known size or for commands that return a variable amount of data.
*
* If a buffer is provided, that is used, although it could be chopped
* to the maximum size of the buffer. If the buffer is NULL, then one
* be allocated in *preply with the whole contents of the reply.
*
* @rc needs to be referenced
*/
static
ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
struct uwb_rceb *reply, size_t reply_size,
u8 expected_type, u16 expected_event,
struct uwb_rceb **preply)
{
ssize_t result = 0;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_cmd_done_params params;
init_completion(&params.completion);
params.reply = reply;
params.reply_size = reply_size;
result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
expected_type, expected_event,
uwb_rc_cmd_done, &params);
if (result)
return result;
wait_for_completion(&params.completion);
if (preply)
*preply = params.reply;
if (params.reply_size < 0)
dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
"reception failed: %d\n", cmd_name,
expected_type, expected_event, cmd->bCommandContext,
(int)params.reply_size);
return params.reply_size;
}
/**
* Generic function for issuing commands to the Radio Control Interface
*
* @rc: UWB Radio Control descriptor
* @cmd_name: Name of the command being issued (for error messages)
* @cmd: Pointer to rccb structure containing the command;
* normally you embed this structure as the first member of
* the full command structure.
* @cmd_size: Size of the whole command buffer pointed to by @cmd.
* @reply: Pointer to the beginning of the confirmation event
* buffer. Normally bigger than an 'struct hwarc_rceb'.
* You need to fill out reply->bEventType and reply->wEvent (in
* cpu order) as the function will use them to verify the
* confirmation event.
* @reply_size: Size of the reply buffer
*
* The function checks that the length returned in the reply is at
* least as big as @reply_size; if not, it will be deemed an error and
* -EIO returned.
*
* @rc needs to be referenced
*/
ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
struct uwb_rceb *reply, size_t reply_size)
{
struct device *dev = &rc->uwb_dev.dev;
ssize_t result;
result = __uwb_rc_cmd(rc, cmd_name,
cmd, cmd_size, reply, reply_size,
reply->bEventType, reply->wEvent, NULL);
if (result > 0 && result < reply_size) {
dev_err(dev, "%s: not enough data returned for decoding reply "
"(%zu bytes received vs at least %zu needed)\n",
cmd_name, result, reply_size);
result = -EIO;
}
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_cmd);
/**
* Generic function for issuing commands to the Radio Control
* Interface that return an unknown amount of data
*
* @rc: UWB Radio Control descriptor
* @cmd_name: Name of the command being issued (for error messages)
* @cmd: Pointer to rccb structure containing the command;
* normally you embed this structure as the first member of
* the full command structure.
* @cmd_size: Size of the whole command buffer pointed to by @cmd.
* @expected_type: Expected type in the return event
* @expected_event: Expected event code in the return event
* @preply: Here a pointer to where the event data is received will
* be stored. Once done with the data, free with kfree().
*
* The function checks that the length returned in the reply is at
* least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
* error and -EIO returned.
*
* @rc needs to be referenced
*/
ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
struct uwb_rceb **preply)
{
return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
expected_type, expected_event, preply);
}
EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
/**
* Reset a UWB Host Controller (and all radio settings)
*
* @rc: Host Controller descriptor
* @returns: 0 if ok, < 0 errno code on error
*
* We put the command on kmalloc'ed memory as some arches cannot do
* USB from the stack. The reply event is copied from an stage buffer,
* so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
*/
int uwb_rc_reset(struct uwb_rc *rc)
{
int result = -ENOMEM;
struct uwb_rc_evt_confirm reply;
struct uwb_rccb *cmd;
size_t cmd_size = sizeof(*cmd);
mutex_lock(&rc->uwb_dev.mutex);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_kzalloc;
cmd->bCommandType = UWB_RC_CET_GENERAL;
cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_RESET;
result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"RESET: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
}
error_cmd:
kfree(cmd);
error_kzalloc:
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
int uwbd_msg_handle_reset(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
int ret;
dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
ret = rc->reset(rc);
if (ret < 0) {
dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
goto error;
}
return 0;
error:
/* Nothing can be done except try the reset again. Wait a bit
to avoid reset loops during probe() or remove(). */
msleep(1000);
uwb_rc_reset_all(rc);
return ret;
}
/**
* uwb_rc_reset_all - request a reset of the radio controller and PALs
* @rc: the radio controller of the hardware device to be reset.
*
* The full hardware reset of the radio controller and all the PALs
* will be scheduled.
*/
void uwb_rc_reset_all(struct uwb_rc *rc)
{
struct uwb_event *evt;
evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
if (unlikely(evt == NULL))
return;
evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
evt->ts_jiffies = jiffies;
evt->type = UWB_EVT_TYPE_MSG;
evt->message = UWB_EVT_MSG_RESET;
uwbd_event_queue(evt);
}
EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
void uwb_rc_pre_reset(struct uwb_rc *rc)
{
rc->stop(rc);
uwbd_flush(rc);
uwb_radio_reset_state(rc);
uwb_rsv_remove_all(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_pre_reset);
int uwb_rc_post_reset(struct uwb_rc *rc)
{
int ret;
ret = rc->start(rc);
if (ret)
goto out;
ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr);
if (ret)
goto out;
ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr);
if (ret)
goto out;
out:
return ret;
}
EXPORT_SYMBOL_GPL(uwb_rc_post_reset);

File diff suppressed because it is too large Load Diff

View File

@ -1,120 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Scanning management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
* FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
* with each other. Currently seems that START_BEACON while
* SCAN_ONLY will cancel the scan, so we need to update the
* state here. Clarification request sent by email on
* 10/05/2005.
* 10/28/2005 No clear answer heard--maybe we'll hack the API
* so that when we start beaconing, if the HC is
* scanning in a mode not compatible with beaconing
* we just fail.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include "uwb-internal.h"
/**
* Start/stop scanning in a radio controller
*
* @rc: UWB Radio Controller
* @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
* @type: Type of scanning to do.
* @bpst_offset: value at which to start scanning (if type ==
* UWB_SCAN_ONLY_STARTTIME)
* @returns: 0 if ok, < 0 errno code on error
*
* We put the command on kmalloc'ed memory as some arches cannot do
* USB from the stack. The reply event is copied from an stage buffer,
* so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
*/
int uwb_rc_scan(struct uwb_rc *rc,
unsigned channel, enum uwb_scan_type type,
unsigned bpst_offset)
{
int result;
struct uwb_rc_cmd_scan *cmd;
struct uwb_rc_evt_confirm reply;
result = -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
goto error_kzalloc;
mutex_lock(&rc->uwb_dev.mutex);
cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
cmd->bChannelNumber = channel;
cmd->bScanState = type;
cmd->wStartTime = cpu_to_le16(bpst_offset);
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_SCAN;
result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"SCAN: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
goto error_cmd;
}
rc->scanning = channel;
rc->scan_type = type;
error_cmd:
mutex_unlock(&rc->uwb_dev.mutex);
kfree(cmd);
error_kzalloc:
return result;
}
/*
* Print scanning state
*/
static ssize_t uwb_rc_scan_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
ssize_t result;
mutex_lock(&rc->uwb_dev.mutex);
result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
/*
*
*/
static ssize_t uwb_rc_scan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
unsigned channel;
unsigned type;
unsigned bpst_offset = 0;
ssize_t result = -EINVAL;
result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
if (result >= 2 && type < UWB_SCAN_TOP)
result = uwb_rc_scan(rc, channel, type, bpst_offset);
return result < 0 ? result : size;
}
/** Radio Control sysfs interface (declaration) */
DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);

View File

@ -1,211 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Bus for UWB Multi-interface Controller capabilities.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "include/umc.h"
static int umc_bus_pre_reset_helper(struct device *dev, void *data)
{
int ret = 0;
if (dev->driver) {
struct umc_dev *umc = to_umc_dev(dev);
struct umc_driver *umc_drv = to_umc_driver(dev->driver);
if (umc_drv->pre_reset)
ret = umc_drv->pre_reset(umc);
else
device_release_driver(dev);
}
return ret;
}
static int umc_bus_post_reset_helper(struct device *dev, void *data)
{
int ret = 0;
if (dev->driver) {
struct umc_dev *umc = to_umc_dev(dev);
struct umc_driver *umc_drv = to_umc_driver(dev->driver);
if (umc_drv->post_reset)
ret = umc_drv->post_reset(umc);
} else
ret = device_attach(dev);
return ret;
}
/**
* umc_controller_reset - reset the whole UMC controller
* @umc: the UMC device for the radio controller.
*
* Drivers or all capabilities of the controller will have their
* pre_reset methods called or be unbound from their device. Then all
* post_reset methods will be called or the drivers will be rebound.
*
* Radio controllers must provide pre_reset and post_reset methods and
* reset the hardware in their start method.
*
* If this is called while a probe() or remove() is in progress it
* will return -EAGAIN and not perform the reset.
*/
int umc_controller_reset(struct umc_dev *umc)
{
struct device *parent = umc->dev.parent;
int ret = 0;
if (!device_trylock(parent))
return -EAGAIN;
ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper);
if (ret >= 0)
ret = device_for_each_child(parent, parent, umc_bus_post_reset_helper);
device_unlock(parent);
return ret;
}
EXPORT_SYMBOL_GPL(umc_controller_reset);
/**
* umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
* @umc_drv: umc driver with match_data pointing to a zero-terminated
* table of pci_device_id's.
* @umc: umc device whose parent is to be matched.
*/
int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
{
const struct pci_device_id *id_table = umc_drv->match_data;
struct pci_dev *pci;
if (!dev_is_pci(umc->dev.parent))
return 0;
pci = to_pci_dev(umc->dev.parent);
return pci_match_id(id_table, pci) != NULL;
}
EXPORT_SYMBOL_GPL(umc_match_pci_id);
static int umc_bus_rescan_helper(struct device *dev, void *data)
{
int ret = 0;
if (!dev->driver)
ret = device_attach(dev);
return ret;
}
static void umc_bus_rescan(struct device *parent)
{
int err;
/*
* We can't use bus_rescan_devices() here as it deadlocks when
* it tries to retake the dev->parent semaphore.
*/
err = device_for_each_child(parent, NULL, umc_bus_rescan_helper);
if (err < 0)
printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
KBUILD_MODNAME, err);
}
static int umc_bus_match(struct device *dev, struct device_driver *drv)
{
struct umc_dev *umc = to_umc_dev(dev);
struct umc_driver *umc_driver = to_umc_driver(drv);
if (umc->cap_id == umc_driver->cap_id) {
if (umc_driver->match)
return umc_driver->match(umc_driver, umc);
else
return 1;
}
return 0;
}
static int umc_device_probe(struct device *dev)
{
struct umc_dev *umc;
struct umc_driver *umc_driver;
int err;
umc_driver = to_umc_driver(dev->driver);
umc = to_umc_dev(dev);
get_device(dev);
err = umc_driver->probe(umc);
if (err)
put_device(dev);
else
umc_bus_rescan(dev->parent);
return err;
}
static int umc_device_remove(struct device *dev)
{
struct umc_dev *umc;
struct umc_driver *umc_driver;
umc_driver = to_umc_driver(dev->driver);
umc = to_umc_dev(dev);
umc_driver->remove(umc);
put_device(dev);
return 0;
}
static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct umc_dev *umc = to_umc_dev(dev);
return sprintf(buf, "0x%02x\n", umc->cap_id);
}
static DEVICE_ATTR_RO(capability_id);
static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct umc_dev *umc = to_umc_dev(dev);
return sprintf(buf, "0x%04x\n", umc->version);
}
static DEVICE_ATTR_RO(version);
static struct attribute *umc_dev_attrs[] = {
&dev_attr_capability_id.attr,
&dev_attr_version.attr,
NULL,
};
ATTRIBUTE_GROUPS(umc_dev);
struct bus_type umc_bus_type = {
.name = "umc",
.match = umc_bus_match,
.probe = umc_device_probe,
.remove = umc_device_remove,
.dev_groups = umc_dev_groups,
};
EXPORT_SYMBOL_GPL(umc_bus_type);
static int __init umc_bus_init(void)
{
return bus_register(&umc_bus_type);
}
module_init(umc_bus_init);
static void __exit umc_bus_exit(void)
{
bus_unregister(&umc_bus_type);
}
module_exit(umc_bus_exit);
MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
MODULE_LICENSE("GPL");

View File

@ -1,94 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB Multi-interface Controller device management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include "include/umc.h"
static void umc_device_release(struct device *dev)
{
struct umc_dev *umc = to_umc_dev(dev);
kfree(umc);
}
/**
* umc_device_create - allocate a child UMC device
* @parent: parent of the new UMC device.
* @n: index of the new device.
*
* The new UMC device will have a bus ID of the parent with '-n'
* appended.
*/
struct umc_dev *umc_device_create(struct device *parent, int n)
{
struct umc_dev *umc;
umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
if (umc) {
dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n);
umc->dev.parent = parent;
umc->dev.bus = &umc_bus_type;
umc->dev.release = umc_device_release;
umc->dev.dma_mask = parent->dma_mask;
}
return umc;
}
EXPORT_SYMBOL_GPL(umc_device_create);
/**
* umc_device_register - register a UMC device
* @umc: pointer to the UMC device
*
* The memory resource for the UMC device is acquired and the device
* registered with the system.
*/
int umc_device_register(struct umc_dev *umc)
{
int err;
err = request_resource(umc->resource.parent, &umc->resource);
if (err < 0) {
dev_err(&umc->dev, "can't allocate resource range %pR: %d\n",
&umc->resource, err);
goto error_request_resource;
}
err = device_register(&umc->dev);
if (err < 0)
goto error_device_register;
return 0;
error_device_register:
put_device(&umc->dev);
release_resource(&umc->resource);
error_request_resource:
return err;
}
EXPORT_SYMBOL_GPL(umc_device_register);
/**
* umc_device_unregister - unregister a UMC device
* @umc: pointer to the UMC device
*
* First we unregister the device, make sure the driver can do it's
* resource release thing and then we try to release any left over
* resources. We take a ref to the device, to make sure it doesn't
* disappear under our feet.
*/
void umc_device_unregister(struct umc_dev *umc)
{
struct device *dev;
if (!umc)
return;
dev = get_device(&umc->dev);
device_unregister(&umc->dev);
release_resource(&umc->resource);
put_device(dev);
}
EXPORT_SYMBOL_GPL(umc_device_unregister);

View File

@ -1,31 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* UWB Multi-interface Controller driver management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include "include/umc.h"
int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
const char *mod_name)
{
umc_drv->driver.name = umc_drv->name;
umc_drv->driver.owner = module;
umc_drv->driver.mod_name = mod_name;
umc_drv->driver.bus = &umc_bus_type;
return driver_register(&umc_drv->driver);
}
EXPORT_SYMBOL_GPL(__umc_driver_register);
/**
* umc_driver_register - unregister a UMC capabiltity driver.
* @umc_drv: pointer to the driver.
*/
void umc_driver_unregister(struct umc_driver *umc_drv)
{
driver_unregister(&umc_drv->driver);
}
EXPORT_SYMBOL_GPL(umc_driver_unregister);

View File

@ -1,354 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Debug support
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* FIXME: doc
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include "include/debug-cmd.h"
#include "uwb-internal.h"
/*
* Debug interface
*
* Per radio controller debugfs files (in uwb/uwbN/):
*
* command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
*
* reservations: information on reservations.
*
* accept: Set to true (Y or 1) to accept reservation requests from
* peers.
*
* drp_avail: DRP availability information.
*/
struct uwb_dbg {
struct uwb_pal pal;
bool accept;
struct list_head rsvs;
struct dentry *root_d;
struct dentry *command_f;
struct dentry *reservations_f;
struct dentry *accept_f;
struct dentry *drp_avail_f;
spinlock_t list_lock;
};
static struct dentry *root_dir;
static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
{
struct uwb_dbg *dbg = rsv->pal_priv;
uwb_rsv_dump("debug", rsv);
if (rsv->state == UWB_RSV_STATE_NONE) {
spin_lock(&dbg->list_lock);
list_del(&rsv->pal_node);
spin_unlock(&dbg->list_lock);
uwb_rsv_destroy(rsv);
}
}
static int cmd_rsv_establish(struct uwb_rc *rc,
struct uwb_dbg_cmd_rsv_establish *cmd)
{
struct uwb_mac_addr macaddr;
struct uwb_rsv *rsv;
struct uwb_dev *target;
int ret;
memcpy(&macaddr, cmd->target, sizeof(macaddr));
target = uwb_dev_get_by_macaddr(rc, &macaddr);
if (target == NULL)
return -ENODEV;
rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg);
if (rsv == NULL) {
uwb_dev_put(target);
return -ENOMEM;
}
rsv->target.type = UWB_RSV_TARGET_DEV;
rsv->target.dev = target;
rsv->type = cmd->type;
rsv->max_mas = cmd->max_mas;
rsv->min_mas = cmd->min_mas;
rsv->max_interval = cmd->max_interval;
ret = uwb_rsv_establish(rsv);
if (ret)
uwb_rsv_destroy(rsv);
else {
spin_lock(&(rc->dbg)->list_lock);
list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
spin_unlock(&(rc->dbg)->list_lock);
}
return ret;
}
static int cmd_rsv_terminate(struct uwb_rc *rc,
struct uwb_dbg_cmd_rsv_terminate *cmd)
{
struct uwb_rsv *rsv, *found = NULL;
int i = 0;
spin_lock(&(rc->dbg)->list_lock);
list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
if (i == cmd->index) {
found = rsv;
uwb_rsv_get(found);
break;
}
i++;
}
spin_unlock(&(rc->dbg)->list_lock);
if (!found)
return -EINVAL;
uwb_rsv_terminate(found);
uwb_rsv_put(found);
return 0;
}
static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add)
{
return uwb_rc_ie_add(rc,
(const struct uwb_ie_hdr *) ie_to_add->data,
ie_to_add->len);
}
static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm)
{
return uwb_rc_ie_rm(rc, ie_to_rm->data[0]);
}
static ssize_t command_write(struct file *file, const char __user *buf,
size_t len, loff_t *off)
{
struct uwb_rc *rc = file->private_data;
struct uwb_dbg_cmd cmd;
int ret = 0;
if (len != sizeof(struct uwb_dbg_cmd))
return -EINVAL;
if (copy_from_user(&cmd, buf, len) != 0)
return -EFAULT;
switch (cmd.type) {
case UWB_DBG_CMD_RSV_ESTABLISH:
ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
break;
case UWB_DBG_CMD_RSV_TERMINATE:
ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
break;
case UWB_DBG_CMD_IE_ADD:
ret = cmd_ie_add(rc, &cmd.ie_add);
break;
case UWB_DBG_CMD_IE_RM:
ret = cmd_ie_rm(rc, &cmd.ie_rm);
break;
case UWB_DBG_CMD_RADIO_START:
ret = uwb_radio_start(&rc->dbg->pal);
break;
case UWB_DBG_CMD_RADIO_STOP:
uwb_radio_stop(&rc->dbg->pal);
break;
default:
return -EINVAL;
}
return ret < 0 ? ret : len;
}
static const struct file_operations command_fops = {
.open = simple_open,
.write = command_write,
.read = NULL,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
static int reservations_show(struct seq_file *s, void *p)
{
struct uwb_rc *rc = s->private;
struct uwb_rsv *rsv;
mutex_lock(&rc->rsvs_mutex);
list_for_each_entry(rsv, &rc->reservations, rc_node) {
struct uwb_dev_addr devaddr;
char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
bool is_owner;
uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
if (rsv->target.type == UWB_RSV_TARGET_DEV) {
devaddr = rsv->target.dev->dev_addr;
is_owner = &rc->uwb_dev == rsv->owner;
} else {
devaddr = rsv->target.devaddr;
is_owner = true;
}
uwb_dev_addr_print(target, sizeof(target), &devaddr);
seq_printf(s, "%c %s -> %s: %s\n",
is_owner ? 'O' : 'T',
owner, target, uwb_rsv_state_str(rsv->state));
seq_printf(s, " stream: %d type: %s\n",
rsv->stream, uwb_rsv_type_str(rsv->type));
seq_printf(s, " %*pb\n", UWB_NUM_MAS, rsv->mas.bm);
}
mutex_unlock(&rc->rsvs_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(reservations);
static int drp_avail_show(struct seq_file *s, void *p)
{
struct uwb_rc *rc = s->private;
seq_printf(s, "global: %*pb\n", UWB_NUM_MAS, rc->drp_avail.global);
seq_printf(s, "local: %*pb\n", UWB_NUM_MAS, rc->drp_avail.local);
seq_printf(s, "pending: %*pb\n", UWB_NUM_MAS, rc->drp_avail.pending);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(drp_avail);
static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel)
{
struct device *dev = &pal->rc->uwb_dev.dev;
if (channel > 0)
dev_info(dev, "debug: channel %d started\n", channel);
else
dev_info(dev, "debug: channel stopped\n");
}
static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv)
{
struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal);
if (dbg->accept) {
spin_lock(&dbg->list_lock);
list_add_tail(&rsv->pal_node, &dbg->rsvs);
spin_unlock(&dbg->list_lock);
uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg);
}
}
/**
* uwb_dbg_add_rc - add a debug interface for a radio controller
* @rc: the radio controller
*/
void uwb_dbg_add_rc(struct uwb_rc *rc)
{
rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
if (rc->dbg == NULL)
return;
INIT_LIST_HEAD(&rc->dbg->rsvs);
spin_lock_init(&(rc->dbg)->list_lock);
uwb_pal_init(&rc->dbg->pal);
rc->dbg->pal.rc = rc;
rc->dbg->pal.channel_changed = uwb_dbg_channel_changed;
rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
uwb_pal_register(&rc->dbg->pal);
if (root_dir) {
rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
root_dir);
rc->dbg->command_f = debugfs_create_file("command", 0200,
rc->dbg->root_d, rc,
&command_fops);
rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
rc->dbg->root_d, rc,
&reservations_fops);
rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
rc->dbg->root_d,
&rc->dbg->accept);
rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
rc->dbg->root_d, rc,
&drp_avail_fops);
}
}
/**
* uwb_dbg_del_rc - remove a radio controller's debug interface
* @rc: the radio controller
*/
void uwb_dbg_del_rc(struct uwb_rc *rc)
{
struct uwb_rsv *rsv, *t;
if (rc->dbg == NULL)
return;
list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
uwb_rsv_terminate(rsv);
}
uwb_pal_unregister(&rc->dbg->pal);
if (root_dir) {
debugfs_remove(rc->dbg->drp_avail_f);
debugfs_remove(rc->dbg->accept_f);
debugfs_remove(rc->dbg->reservations_f);
debugfs_remove(rc->dbg->command_f);
debugfs_remove(rc->dbg->root_d);
}
}
/**
* uwb_dbg_exit - initialize the debug interface sub-module
*/
void uwb_dbg_init(void)
{
root_dir = debugfs_create_dir("uwb", NULL);
}
/**
* uwb_dbg_exit - clean-up the debug interface sub-module
*/
void uwb_dbg_exit(void)
{
debugfs_remove(root_dir);
}
/**
* uwb_dbg_create_pal_dir - create a debugfs directory for a PAL
* @pal: The PAL.
*/
struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal)
{
struct uwb_rc *rc = pal->rc;
if (root_dir && rc->dbg && rc->dbg->root_d && pal->name)
return debugfs_create_dir(pal->name, rc->dbg->root_d);
return NULL;
}

View File

@ -1,366 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Ultra Wide Band
* UWB internal API
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This contains most of the internal API for UWB. This is stuff used
* across the stack that of course, is of no interest to the rest.
*
* Some parts might end up going public (like uwb_rc_*())...
*/
#ifndef __UWB_INTERNAL_H__
#define __UWB_INTERNAL_H__
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include "uwb.h"
struct uwb_beca_e;
/* General device API */
extern void uwb_dev_init(struct uwb_dev *uwb_dev);
extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
struct uwb_rc *parent_rc);
extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
extern void uwbd_dev_offair(struct uwb_beca_e *);
void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
/* General UWB Radio Controller Internal API */
extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
{
uwb_dev_get(&rc->uwb_dev);
return rc;
}
static inline void __uwb_rc_put(struct uwb_rc *rc)
{
if (rc)
uwb_dev_put(&rc->uwb_dev);
}
extern int uwb_rc_reset(struct uwb_rc *rc);
extern int uwb_rc_beacon(struct uwb_rc *rc,
int channel, unsigned bpst_offset);
extern int uwb_rc_scan(struct uwb_rc *rc,
unsigned channel, enum uwb_scan_type type,
unsigned bpst_offset);
extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
void uwb_rc_ie_init(struct uwb_rc *);
int uwb_rc_ie_setup(struct uwb_rc *);
void uwb_rc_ie_release(struct uwb_rc *);
int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len,
char *buf, size_t size);
int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
extern const char *uwb_rc_strerror(unsigned code);
/*
* Time to wait for a response to an RC command.
*
* Some commands can take a long time to response. e.g., START_BEACON
* may scan for several superframes before joining an existing beacon
* group and this can take around 600 ms.
*/
#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
/*
* Notification/Event Handlers
*/
struct uwb_rc_neh;
extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg);
void uwb_rc_neh_create(struct uwb_rc *rc);
void uwb_rc_neh_destroy(struct uwb_rc *rc);
struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg);
void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
void uwb_rc_neh_put(struct uwb_rc_neh *neh);
/* Event size tables */
extern int uwb_est_create(void);
extern void uwb_est_destroy(void);
/*
* UWB conflicting alien reservations
*/
struct uwb_cnflt_alien {
struct uwb_rc *rc;
struct list_head rc_node;
struct uwb_mas_bm mas;
struct timer_list timer;
struct work_struct cnflt_update_work;
};
enum uwb_uwb_rsv_alloc_result {
UWB_RSV_ALLOC_FOUND = 0,
UWB_RSV_ALLOC_NOT_FOUND,
};
enum uwb_rsv_mas_status {
UWB_RSV_MAS_NOT_AVAIL = 1,
UWB_RSV_MAS_SAFE,
UWB_RSV_MAS_UNSAFE,
};
struct uwb_rsv_col_set_info {
unsigned char start_col;
unsigned char interval;
unsigned char safe_mas_per_col;
unsigned char unsafe_mas_per_col;
};
struct uwb_rsv_col_info {
unsigned char max_avail_safe;
unsigned char max_avail_unsafe;
unsigned char highest_mas[UWB_MAS_PER_ZONE];
struct uwb_rsv_col_set_info csi;
};
struct uwb_rsv_row_info {
unsigned char avail[UWB_MAS_PER_ZONE];
unsigned char free_rows;
unsigned char used_rows;
};
/*
* UWB find allocation
*/
struct uwb_rsv_alloc_info {
unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES];
struct uwb_rsv_col_info ci[UWB_NUM_ZONES];
struct uwb_rsv_row_info ri;
struct uwb_mas_bm *not_available;
struct uwb_mas_bm *result;
int min_mas;
int max_mas;
int max_interval;
int total_allocated_mases;
int safe_allocated_mases;
int unsafe_allocated_mases;
int interval;
};
int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv,
struct uwb_mas_bm *available,
struct uwb_mas_bm *result);
void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc);
/*
* UWB Events & management daemon
*/
/**
* enum uwb_event_type - types of UWB management daemon events
*
* The UWB management daemon (uwbd) can receive two types of events:
* UWB_EVT_TYPE_NOTIF - notification from the radio controller.
* UWB_EVT_TYPE_MSG - a simple message.
*/
enum uwb_event_type {
UWB_EVT_TYPE_NOTIF,
UWB_EVT_TYPE_MSG,
};
/**
* struct uwb_event_notif - an event for a radio controller notification
* @size: Size of the buffer (ie: Guaranteed to contain at least
* a full 'struct uwb_rceb')
* @rceb: Pointer to a kmalloced() event payload
*/
struct uwb_event_notif {
size_t size;
struct uwb_rceb *rceb;
};
/**
* enum uwb_event_message - an event for a message for asynchronous processing
*
* UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
*/
enum uwb_event_message {
UWB_EVT_MSG_RESET,
};
/**
* UWB Event
* @rc: Radio controller that emitted the event (referenced)
* @ts_jiffies: Timestamp, when was it received
* @type: This event's type.
*/
struct uwb_event {
struct list_head list_node;
struct uwb_rc *rc;
unsigned long ts_jiffies;
enum uwb_event_type type;
union {
struct uwb_event_notif notif;
enum uwb_event_message message;
};
};
extern void uwbd_start(struct uwb_rc *rc);
extern void uwbd_stop(struct uwb_rc *rc);
extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
extern void uwbd_event_queue(struct uwb_event *);
void uwbd_flush(struct uwb_rc *rc);
/* UWB event handlers */
extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *);
extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
int uwbd_msg_handle_reset(struct uwb_event *evt);
/*
* Address management
*/
int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
/*
* UWB Beacon Cache
*
* Each beacon we received is kept in a cache--when we receive that
* beacon consistently, that means there is a new device that we have
* to add to the system.
*/
extern unsigned long beacon_timeout_ms;
/**
* Beacon cache entry
*
* @jiffies_refresh: last time a beacon was received that refreshed
* this cache entry.
* @uwb_dev: device connected to this beacon. This pointer is not
* safe, you need to get it with uwb_dev_try_get()
*
* @hits: how many time we have seen this beacon since last time we
* cleared it
*/
struct uwb_beca_e {
struct mutex mutex;
struct kref refcnt;
struct list_head node;
struct uwb_mac_addr *mac_addr;
struct uwb_dev_addr dev_addr;
u8 hits;
unsigned long ts_jiffies;
struct uwb_dev *uwb_dev;
struct uwb_rc_evt_beacon *be;
struct stats lqe_stats, rssi_stats; /* radio statistics */
};
struct uwb_beacon_frame;
extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
char *, size_t);
extern void uwb_bce_kfree(struct kref *_bce);
static inline void uwb_bce_get(struct uwb_beca_e *bce)
{
kref_get(&bce->refcnt);
}
static inline void uwb_bce_put(struct uwb_beca_e *bce)
{
kref_put(&bce->refcnt, uwb_bce_kfree);
}
extern void uwb_beca_purge(struct uwb_rc *rc);
extern void uwb_beca_release(struct uwb_rc *rc);
struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
const struct uwb_dev_addr *devaddr);
struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
const struct uwb_mac_addr *macaddr);
int uwb_radio_setup(struct uwb_rc *rc);
void uwb_radio_reset_state(struct uwb_rc *rc);
void uwb_radio_shutdown(struct uwb_rc *rc);
int uwb_radio_force_channel(struct uwb_rc *rc, int channel);
/* -- UWB Sysfs representation */
extern struct class uwb_rc_class;
extern struct bus_type uwb_bus_type;
extern struct device_attribute dev_attr_mac_address;
extern struct device_attribute dev_attr_beacon;
extern struct device_attribute dev_attr_scan;
/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
void uwb_rsv_init(struct uwb_rc *rc);
int uwb_rsv_setup(struct uwb_rc *rc);
void uwb_rsv_cleanup(struct uwb_rc *rc);
void uwb_rsv_remove_all(struct uwb_rc *rc);
void uwb_rsv_get(struct uwb_rsv *rsv);
void uwb_rsv_put(struct uwb_rsv *rsv);
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
void uwb_rsv_backoff_win_timer(struct timer_list *t);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
void uwb_rsv_remove(struct uwb_rsv *rsv);
struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
struct uwb_ie_drp *drp_ie);
void uwb_rsv_sched_update(struct uwb_rc *rc);
void uwb_rsv_queue_update(struct uwb_rc *rc);
int uwb_drp_ie_update(struct uwb_rsv *rsv);
void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
void uwb_drp_avail_init(struct uwb_rc *rc);
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail);
int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
void uwb_drp_avail_ie_update(struct uwb_rc *rc);
/* -- PAL support */
void uwb_rc_pal_init(struct uwb_rc *rc);
/* -- Misc */
extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
const struct uwb_mac_frame_hdr *);
/* -- Debug interface */
void uwb_dbg_init(void);
void uwb_dbg_exit(void);
void uwb_dbg_add_rc(struct uwb_rc *rc);
void uwb_dbg_del_rc(struct uwb_rc *rc);
struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal);
static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
{
device_lock(&uwb_dev->dev);
}
static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
{
device_unlock(&uwb_dev->dev);
}
#endif /* #ifndef __UWB_INTERNAL_H__ */

View File

@ -1,817 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Ultra Wide Band
* UWB API
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: doc: overview of the API, different parts and pointers
*/
#ifndef __LINUX__UWB_H__
#define __LINUX__UWB_H__
#include <linux/limits.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <asm/page.h>
#include "include/spec.h"
struct uwb_dev;
struct uwb_beca_e;
struct uwb_rc;
struct uwb_rsv;
struct uwb_dbg;
/**
* struct uwb_dev - a UWB Device
* @rc: UWB Radio Controller that discovered the device (kind of its
* parent).
* @bce: a beacon cache entry for this device; or NULL if the device
* is a local radio controller.
* @mac_addr: the EUI-48 address of this device.
* @dev_addr: the current DevAddr used by this device.
* @beacon_slot: the slot number the beacon is using.
* @streams: bitmap of streams allocated to reservations targeted at
* this device. For an RC, this is the streams allocated for
* reservations targeted at DevAddrs.
*
* A UWB device may either by a neighbor or part of a local radio
* controller.
*/
struct uwb_dev {
struct mutex mutex;
struct list_head list_node;
struct device dev;
struct uwb_rc *rc; /* radio controller */
struct uwb_beca_e *bce; /* Beacon Cache Entry */
struct uwb_mac_addr mac_addr;
struct uwb_dev_addr dev_addr;
int beacon_slot;
DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
};
#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
/**
* UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
*
* RC[CE]Bs have a 'context ID' field that matches the command with
* the event received to confirm it.
*
* Maximum number of context IDs
*/
enum { UWB_RC_CTX_MAX = 256 };
/** Notification chain head for UWB generated events to listeners */
struct uwb_notifs_chain {
struct list_head list;
struct mutex mutex;
};
/* Beacon cache list */
struct uwb_beca {
struct list_head list;
size_t entries;
struct mutex mutex;
};
/* Event handling thread. */
struct uwbd {
int pid;
struct task_struct *task;
wait_queue_head_t wq;
struct list_head event_list;
spinlock_t event_list_lock;
};
/**
* struct uwb_mas_bm - a bitmap of all MAS in a superframe
* @bm: a bitmap of length #UWB_NUM_MAS
*/
struct uwb_mas_bm {
DECLARE_BITMAP(bm, UWB_NUM_MAS);
DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
int safe;
int unsafe;
};
/**
* uwb_rsv_state - UWB Reservation state.
*
* NONE - reservation is not active (no DRP IE being transmitted).
*
* Owner reservation states:
*
* INITIATED - owner has sent an initial DRP request.
* PENDING - target responded with pending Reason Code.
* MODIFIED - reservation manager is modifying an established
* reservation with a different MAS allocation.
* ESTABLISHED - the reservation has been successfully negotiated.
*
* Target reservation states:
*
* DENIED - request is denied.
* ACCEPTED - request is accepted.
* PENDING - PAL has yet to make a decision to whether to accept or
* deny.
*
* FIXME: further target states TBD.
*/
enum uwb_rsv_state {
UWB_RSV_STATE_NONE = 0,
UWB_RSV_STATE_O_INITIATED,
UWB_RSV_STATE_O_PENDING,
UWB_RSV_STATE_O_MODIFIED,
UWB_RSV_STATE_O_ESTABLISHED,
UWB_RSV_STATE_O_TO_BE_MOVED,
UWB_RSV_STATE_O_MOVE_EXPANDING,
UWB_RSV_STATE_O_MOVE_COMBINING,
UWB_RSV_STATE_O_MOVE_REDUCING,
UWB_RSV_STATE_T_ACCEPTED,
UWB_RSV_STATE_T_DENIED,
UWB_RSV_STATE_T_CONFLICT,
UWB_RSV_STATE_T_PENDING,
UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
UWB_RSV_STATE_T_EXPANDING_CONFLICT,
UWB_RSV_STATE_T_EXPANDING_PENDING,
UWB_RSV_STATE_T_EXPANDING_DENIED,
UWB_RSV_STATE_T_RESIZED,
UWB_RSV_STATE_LAST,
};
enum uwb_rsv_target_type {
UWB_RSV_TARGET_DEV,
UWB_RSV_TARGET_DEVADDR,
};
/**
* struct uwb_rsv_target - the target of a reservation.
*
* Reservations unicast and targeted at a single device
* (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
* specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
*/
struct uwb_rsv_target {
enum uwb_rsv_target_type type;
union {
struct uwb_dev *dev;
struct uwb_dev_addr devaddr;
};
};
struct uwb_rsv_move {
struct uwb_mas_bm final_mas;
struct uwb_ie_drp *companion_drp_ie;
struct uwb_mas_bm companion_mas;
};
/*
* Number of streams reserved for reservations targeted at DevAddrs.
*/
#define UWB_NUM_GLOBAL_STREAMS 1
typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
/**
* struct uwb_rsv - a DRP reservation
*
* Data structure management:
*
* @rc: the radio controller this reservation is for
* (as target or owner)
* @rc_node: a list node for the RC
* @pal_node: a list node for the PAL
*
* Owner and target parameters:
*
* @owner: the UWB device owning this reservation
* @target: the target UWB device
* @type: reservation type
*
* Owner parameters:
*
* @max_mas: maxiumum number of MAS
* @min_mas: minimum number of MAS
* @sparsity: owner selected sparsity
* @is_multicast: true iff multicast
*
* @callback: callback function when the reservation completes
* @pal_priv: private data for the PAL making the reservation
*
* Reservation status:
*
* @status: negotiation status
* @stream: stream index allocated for this reservation
* @tiebreaker: conflict tiebreaker for this reservation
* @mas: reserved MAS
* @drp_ie: the DRP IE
* @ie_valid: true iff the DRP IE matches the reservation parameters
*
* DRP reservations are uniquely identified by the owner, target and
* stream index. However, when using a DevAddr as a target (e.g., for
* a WUSB cluster reservation) the responses may be received from
* devices with different DevAddrs. In this case, reservations are
* uniquely identified by just the stream index. A number of stream
* indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
*/
struct uwb_rsv {
struct uwb_rc *rc;
struct list_head rc_node;
struct list_head pal_node;
struct kref kref;
struct uwb_dev *owner;
struct uwb_rsv_target target;
enum uwb_drp_type type;
int max_mas;
int min_mas;
int max_interval;
bool is_multicast;
uwb_rsv_cb_f callback;
void *pal_priv;
enum uwb_rsv_state state;
bool needs_release_companion_mas;
u8 stream;
u8 tiebreaker;
struct uwb_mas_bm mas;
struct uwb_ie_drp *drp_ie;
struct uwb_rsv_move mv;
bool ie_valid;
struct timer_list timer;
struct work_struct handle_timeout_work;
};
static const
struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
{
bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
}
/**
* struct uwb_drp_avail - a radio controller's view of MAS usage
* @global: MAS unused by neighbors (excluding reservations targeted
* or owned by the local radio controller) or the beaon period
* @local: MAS unused by local established reservations
* @pending: MAS unused by local pending reservations
* @ie: DRP Availability IE to be included in the beacon
* @ie_valid: true iff @ie is valid and does not need to regenerated from
* @global and @local
*
* Each radio controller maintains a view of MAS usage or
* availability. MAS available for a new reservation are determined
* from the intersection of @global, @local, and @pending.
*
* The radio controller must transmit a DRP Availability IE that's the
* intersection of @global and @local.
*
* A set bit indicates the MAS is unused and available.
*
* rc->rsvs_mutex should be held before accessing this data structure.
*
* [ECMA-368] section 17.4.3.
*/
struct uwb_drp_avail {
DECLARE_BITMAP(global, UWB_NUM_MAS);
DECLARE_BITMAP(local, UWB_NUM_MAS);
DECLARE_BITMAP(pending, UWB_NUM_MAS);
struct uwb_ie_drp_avail ie;
bool ie_valid;
};
struct uwb_drp_backoff_win {
u8 window;
u8 n;
int total_expired;
struct timer_list timer;
bool can_reserve_extra_mases;
};
const char *uwb_rsv_state_str(enum uwb_rsv_state state);
const char *uwb_rsv_type_str(enum uwb_drp_type type);
struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
void *pal_priv);
void uwb_rsv_destroy(struct uwb_rsv *rsv);
int uwb_rsv_establish(struct uwb_rsv *rsv);
int uwb_rsv_modify(struct uwb_rsv *rsv,
int max_mas, int min_mas, int sparsity);
void uwb_rsv_terminate(struct uwb_rsv *rsv);
void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
/**
* Radio Control Interface instance
*
*
* Life cycle rules: those of the UWB Device.
*
* @index: an index number for this radio controller, as used in the
* device name.
* @version: version of protocol supported by this device
* @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
* @cmd: Backend implementation to execute commands; rw and call
* only with uwb_dev.dev.sem taken.
* @reset: Hardware reset of radio controller and any PAL controllers.
* @filter: Backend implementation to manipulate data to and from device
* to be compliant to specification assumed by driver (WHCI
* 0.95).
*
* uwb_dev.dev.mutex is used to execute commands and update
* the corresponding structures; can't use a spinlock
* because rc->cmd() can sleep.
* @ies: This is a dynamically allocated array cacheing the
* IEs (settable by the host) that the beacon of this
* radio controller is currently sending.
*
* In reality, we store here the full command we set to
* the radio controller (which is basically a command
* prefix followed by all the IEs the beacon currently
* contains). This way we don't have to realloc and
* memcpy when setting it.
*
* We set this up in uwb_rc_ie_setup(), where we alloc
* this struct, call get_ie() [so we know which IEs are
* currently being sent, if any].
*
* @ies_capacity:Amount of space (in bytes) allocated in @ies. The
* amount used is given by sizeof(*ies) plus ies->wIELength
* (which is a little endian quantity all the time).
* @ies_mutex: protect the IE cache
* @dbg: information for the debug interface
*/
struct uwb_rc {
struct uwb_dev uwb_dev;
int index;
u16 version;
struct module *owner;
void *priv;
int (*start)(struct uwb_rc *rc);
void (*stop)(struct uwb_rc *rc);
int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
int (*reset)(struct uwb_rc *rc);
int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
size_t *, size_t *);
spinlock_t neh_lock; /* protects neh_* and ctx_* */
struct list_head neh_list; /* Open NE handles */
unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
u8 ctx_roll;
int beaconing; /* Beaconing state [channel number] */
int beaconing_forced;
int scanning;
enum uwb_scan_type scan_type:3;
unsigned ready:1;
struct uwb_notifs_chain notifs_chain;
struct uwb_beca uwb_beca;
struct uwbd uwbd;
struct uwb_drp_backoff_win bow;
struct uwb_drp_avail drp_avail;
struct list_head reservations;
struct list_head cnflt_alien_list;
struct uwb_mas_bm cnflt_alien_bitmap;
struct mutex rsvs_mutex;
spinlock_t rsvs_lock;
struct workqueue_struct *rsv_workq;
struct delayed_work rsv_update_work;
struct delayed_work rsv_alien_bp_work;
int set_drp_ie_pending;
struct mutex ies_mutex;
struct uwb_rc_cmd_set_ie *ies;
size_t ies_capacity;
struct list_head pals;
int active_pals;
struct uwb_dbg *dbg;
};
/**
* struct uwb_pal - a UWB PAL
* @name: descriptive name for this PAL (wusbhc, wlp, etc.).
* @device: a device for the PAL. Used to link the PAL and the radio
* controller in sysfs.
* @rc: the radio controller the PAL uses.
* @channel_changed: called when the channel used by the radio changes.
* A channel of -1 means the channel has been stopped.
* @new_rsv: called when a peer requests a reservation (may be NULL if
* the PAL cannot accept reservation requests).
* @channel: channel being used by the PAL; 0 if the PAL isn't using
* the radio; -1 if the PAL wishes to use the radio but
* cannot.
* @debugfs_dir: a debugfs directory which the PAL can use for its own
* debugfs files.
*
* A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
* radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
*
* The PALs using a radio controller must register themselves to
* permit the UWB stack to coordinate usage of the radio between the
* various PALs or to allow PALs to response to certain requests from
* peers.
*
* A struct uwb_pal should be embedded in a containing structure
* belonging to the PAL and initialized with uwb_pal_init()). Fields
* should be set appropriately by the PAL before registering the PAL
* with uwb_pal_register().
*/
struct uwb_pal {
struct list_head node;
const char *name;
struct device *device;
struct uwb_rc *rc;
void (*channel_changed)(struct uwb_pal *pal, int channel);
void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
int channel;
struct dentry *debugfs_dir;
};
void uwb_pal_init(struct uwb_pal *pal);
int uwb_pal_register(struct uwb_pal *pal);
void uwb_pal_unregister(struct uwb_pal *pal);
int uwb_radio_start(struct uwb_pal *pal);
void uwb_radio_stop(struct uwb_pal *pal);
/*
* General public API
*
* This API can be used by UWB device drivers or by those implementing
* UWB Radio Controllers
*/
struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
const struct uwb_dev_addr *devaddr);
struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
{
get_device(&uwb_dev->dev);
}
static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
{
put_device(&uwb_dev->dev);
}
struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
/**
* Callback function for 'uwb_{dev,rc}_foreach()'.
*
* @dev: Linux device instance
* 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
* @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
*
* @returns: 0 to continue the iterations, any other val to stop
* iterating and return the value to the caller of
* _foreach().
*/
typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
struct uwb_rc *uwb_rc_alloc(void);
struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
void uwb_rc_put(struct uwb_rc *rc);
typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
struct uwb_rceb *reply, ssize_t reply_size);
int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg);
ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
struct uwb_rceb *reply, size_t reply_size);
ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
struct uwb_rceb **preply);
size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
int __uwb_mac_addr_assigned_check(struct device *, void *);
int __uwb_dev_addr_assigned_check(struct device *, void *);
/* Print in @buf a pretty repr of @addr */
static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
const struct uwb_dev_addr *addr)
{
return __uwb_addr_print(buf, buf_size, addr->data, 0);
}
/* Print in @buf a pretty repr of @addr */
static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
const struct uwb_mac_addr *addr)
{
return __uwb_addr_print(buf, buf_size, addr->data, 1);
}
/* @returns 0 if device addresses @addr2 and @addr1 are equal */
static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
const struct uwb_dev_addr *addr2)
{
return memcmp(addr1, addr2, sizeof(*addr1));
}
/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
const struct uwb_mac_addr *addr2)
{
return memcmp(addr1, addr2, sizeof(*addr1));
}
/* @returns !0 if a MAC @addr is a broadcast address */
static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
{
struct uwb_mac_addr bcast = {
.data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
};
return !uwb_mac_addr_cmp(addr, &bcast);
}
/* @returns !0 if a MAC @addr is all zeroes*/
static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
{
struct uwb_mac_addr unset = {
.data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
};
return !uwb_mac_addr_cmp(addr, &unset);
}
/* @returns !0 if the address is in use. */
static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
struct uwb_dev_addr *addr)
{
return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
}
/*
* UWB Radio Controller API
*
* This API is used (in addition to the general API) to implement UWB
* Radio Controllers.
*/
void uwb_rc_init(struct uwb_rc *);
int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
void uwb_rc_rm(struct uwb_rc *);
void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
void uwb_rc_neh_error(struct uwb_rc *, int);
void uwb_rc_reset_all(struct uwb_rc *rc);
void uwb_rc_pre_reset(struct uwb_rc *rc);
int uwb_rc_post_reset(struct uwb_rc *rc);
/**
* uwb_rsv_is_owner - is the owner of this reservation the RC?
* @rsv: the reservation
*/
static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
{
return rsv->owner == &rsv->rc->uwb_dev;
}
/**
* enum uwb_notifs - UWB events that can be passed to any listeners
* @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
* @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
*
* Higher layers can register callback functions with the radio
* controller using uwb_notifs_register(). The radio controller
* maintains a list of all registered handlers and will notify all
* nodes when an event occurs.
*/
enum uwb_notifs {
UWB_NOTIF_ONAIR,
UWB_NOTIF_OFFAIR,
};
/* Callback function registered with UWB */
struct uwb_notifs_handler {
struct list_head list_node;
void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
void *data;
};
int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
/**
* UWB radio controller Event Size Entry (for creating entry tables)
*
* WUSB and WHCI define events and notifications, and they might have
* fixed or variable size.
*
* Each event/notification has a size which is not necessarily known
* in advance based on the event code. As well, vendor specific
* events/notifications will have a size impossible to determine
* unless we know about the device's specific details.
*
* It was way too smart of the spec writers not to think that it would
* be impossible for a generic driver to skip over vendor specific
* events/notifications if there are no LENGTH fields in the HEADER of
* each message...the transaction size cannot be counted on as the
* spec does not forbid to pack more than one event in a single
* transaction.
*
* Thus, we guess sizes with tables (or for events, when you know the
* size ahead of time you can use uwb_rc_neh_extra_size*()). We
* register tables with the known events and their sizes, and then we
* traverse those tables. For those with variable length, we provide a
* way to lookup the size inside the event/notification's
* payload. This allows device-specific event size tables to be
* registered.
*
* @size: Size of the payload
*
* @offset: if != 0, at offset @offset-1 starts a field with a length
* that has to be added to @size. The format of the field is
* given by @type.
*
* @type: Type and length of the offset field. Most common is LE 16
* bits (that's why that is zero); others are there mostly to
* cover for bugs and weirdos.
*/
struct uwb_est_entry {
size_t size;
unsigned offset;
enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
};
int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
const struct uwb_est_entry *, size_t entries);
int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
const struct uwb_est_entry *, size_t entries);
ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
size_t len);
/* -- Misc */
enum {
EDC_MAX_ERRORS = 10,
EDC_ERROR_TIMEFRAME = HZ,
};
/* error density counter */
struct edc {
unsigned long timestart;
u16 errorcount;
};
static inline
void edc_init(struct edc *edc)
{
edc->timestart = jiffies;
}
/* Called when an error occurred.
* This is way to determine if the number of acceptable errors per time
* period has been exceeded. It is not accurate as there are cases in which
* this scheme will not work, for example if there are periodic occurrences
* of errors that straddle updates to the start time. This scheme is
* sufficient for our usage.
*
* @returns 1 if maximum acceptable errors per timeframe has been exceeded.
*/
static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
{
unsigned long now;
now = jiffies;
if (now - err_hist->timestart > timeframe) {
err_hist->errorcount = 1;
err_hist->timestart = now;
} else if (++err_hist->errorcount > max_err) {
err_hist->errorcount = 0;
err_hist->timestart = now;
return 1;
}
return 0;
}
/* Information Element handling */
struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
/*
* Transmission statistics
*
* UWB uses LQI and RSSI (one byte values) for reporting radio signal
* strength and line quality indication. We do quick and dirty
* averages of those. They are signed values, btw.
*
* For 8 bit quantities, we keep the min, the max, an accumulator
* (@sigma) and a # of samples. When @samples gets to 255, we compute
* the average (@sigma / @samples), place it in @sigma and reset
* @samples to 1 (so we use it as the first sample).
*
* Now, statistically speaking, probably I am kicking the kidneys of
* some books I have in my shelves collecting dust, but I just want to
* get an approx, not the Nobel.
*
* LOCKING: there is no locking per se, but we try to keep a lockless
* schema. Only _add_samples() modifies the values--as long as you
* have other locking on top that makes sure that no two calls of
* _add_sample() happen at the same time, then we are fine. Now, for
* resetting the values we just set @samples to 0 and that makes the
* next _add_sample() to start with defaults. Reading the values in
* _show() currently can race, so you need to make sure the calls are
* under the same lock that protects calls to _add_sample(). FIXME:
* currently unlocked (It is not ultraprecise but does the trick. Bite
* me).
*/
struct stats {
s8 min, max;
s16 sigma;
atomic_t samples;
};
static inline
void stats_init(struct stats *stats)
{
atomic_set(&stats->samples, 0);
wmb();
}
static inline
void stats_add_sample(struct stats *stats, s8 sample)
{
s8 min, max;
s16 sigma;
unsigned samples = atomic_read(&stats->samples);
if (samples == 0) { /* it was zero before, so we initialize */
min = 127;
max = -128;
sigma = 0;
} else {
min = stats->min;
max = stats->max;
sigma = stats->sigma;
}
if (sample < min) /* compute new values */
min = sample;
else if (sample > max)
max = sample;
sigma += sample;
stats->min = min; /* commit */
stats->max = max;
stats->sigma = sigma;
if (atomic_add_return(1, &stats->samples) > 255) {
/* wrapped around! reset */
stats->sigma = sigma / 256;
atomic_set(&stats->samples, 1);
}
}
static inline ssize_t stats_show(struct stats *stats, char *buf)
{
int min, max, avg;
int samples = atomic_read(&stats->samples);
if (samples == 0)
min = max = avg = 0;
else {
min = stats->min;
max = stats->max;
avg = stats->sigma / samples;
}
return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
}
static inline ssize_t stats_store(struct stats *stats, const char *buf,
size_t size)
{
stats_init(stats);
return size;
}
#endif /* #ifndef __LINUX__UWB_H__ */

View File

@ -1,356 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ultra Wide Band
* Neighborhood Management Daemon
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This daemon takes care of maintaing information that describes the
* UWB neighborhood that the radios in this machine can see. It also
* keeps a tab of which devices are visible, makes sure each HC sits
* on a different channel to avoid interfering, etc.
*
* Different drivers (radio controller, device, any API in general)
* communicate with this daemon through an event queue. Daemon wakes
* up, takes a list of events and handles them one by one; handling
* function is extracted from a table based on the event's type and
* subtype. Events are freed only if the handling function says so.
*
* . Lock protecting the event list has to be an spinlock and locked
* with IRQSAVE because it might be called from an interrupt
* context (ie: when events arrive and the notification drops
* down from the ISR).
*
* . UWB radio controller drivers queue events to the daemon using
* uwbd_event_queue(). They just get the event, chew it to make it
* look like UWBD likes it and pass it in a buffer allocated with
* uwb_event_alloc().
*
* EVENTS
*
* Events have a type, a subtype, a length, some other stuff and the
* data blob, which depends on the event. The header is 'struct
* uwb_event'; for payloads, see 'struct uwbd_evt_*'.
*
* EVENT HANDLER TABLES
*
* To find a handling function for an event, the type is used to index
* a subtype-table in the type-table. The subtype-table is indexed
* with the subtype to get the function that handles the event. Start
* with the main type-table 'uwbd_evt_type_handler'.
*
* DEVICES
*
* Devices are created when a bunch of beacons have been received and
* it is stablished that the device has stable radio presence. CREATED
* only, not configured. Devices are ONLY configured when an
* Application-Specific IE Probe is receieved, in which the device
* declares which Protocol ID it groks. Then the device is CONFIGURED
* (and the driver->probe() stuff of the device model is invoked).
*
* Devices are considered disconnected when a certain number of
* beacons are not received in an amount of time.
*
* Handler functions are called normally uwbd_evt_handle_*().
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/freezer.h>
#include "uwb-internal.h"
/*
* UWBD Event handler function signature
*
* Return !0 if the event needs not to be freed (ie the handler
* takes/took care of it). 0 means the daemon code will free the
* event.
*
* @evt->rc is already referenced and guaranteed to exist. See
* uwb_evt_handle().
*/
typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
/**
* Properties of a UWBD event
*
* @handler: the function that will handle this event
* @name: text name of event
*/
struct uwbd_event {
uwbd_evt_handler_f handler;
const char *name;
};
/* Table of handlers for and properties of the UWBD Radio Control Events */
static struct uwbd_event uwbd_urc_events[] = {
[UWB_RC_EVT_IE_RCV] = {
.handler = uwbd_evt_handle_rc_ie_rcv,
.name = "IE_RECEIVED"
},
[UWB_RC_EVT_BEACON] = {
.handler = uwbd_evt_handle_rc_beacon,
.name = "BEACON_RECEIVED"
},
[UWB_RC_EVT_BEACON_SIZE] = {
.handler = uwbd_evt_handle_rc_beacon_size,
.name = "BEACON_SIZE_CHANGE"
},
[UWB_RC_EVT_BPOIE_CHANGE] = {
.handler = uwbd_evt_handle_rc_bpoie_change,
.name = "BPOIE_CHANGE"
},
[UWB_RC_EVT_BP_SLOT_CHANGE] = {
.handler = uwbd_evt_handle_rc_bp_slot_change,
.name = "BP_SLOT_CHANGE"
},
[UWB_RC_EVT_DRP_AVAIL] = {
.handler = uwbd_evt_handle_rc_drp_avail,
.name = "DRP_AVAILABILITY_CHANGE"
},
[UWB_RC_EVT_DRP] = {
.handler = uwbd_evt_handle_rc_drp,
.name = "DRP"
},
[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
.handler = uwbd_evt_handle_rc_dev_addr_conflict,
.name = "DEV_ADDR_CONFLICT",
},
};
struct uwbd_evt_type_handler {
const char *name;
struct uwbd_event *uwbd_events;
size_t size;
};
/* Table of handlers for each UWBD Event type. */
static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = {
[UWB_RC_CET_GENERAL] = {
.name = "URC",
.uwbd_events = uwbd_urc_events,
.size = ARRAY_SIZE(uwbd_urc_events),
},
};
static const struct uwbd_event uwbd_message_handlers[] = {
[UWB_EVT_MSG_RESET] = {
.handler = uwbd_msg_handle_reset,
.name = "reset",
},
};
/*
* Handle an URC event passed to the UWB Daemon
*
* @evt: the event to handle
* @returns: 0 if the event can be kfreed, !0 on the contrary
* (somebody else took ownership) [coincidentally, returning
* a <0 errno code will free it :)].
*
* Looks up the two indirection tables (one for the type, one for the
* subtype) to decide which function handles it and then calls the
* handler.
*
* The event structure passed to the event handler has the radio
* controller in @evt->rc referenced. The reference will be dropped
* once the handler returns, so if it needs it for longer (async),
* it'll need to take another one.
*/
static
int uwbd_event_handle_urc(struct uwb_event *evt)
{
int result = -EINVAL;
struct uwbd_evt_type_handler *type_table;
uwbd_evt_handler_f handler;
u8 type, context;
u16 event;
type = evt->notif.rceb->bEventType;
event = le16_to_cpu(evt->notif.rceb->wEvent);
context = evt->notif.rceb->bEventContext;
if (type >= ARRAY_SIZE(uwbd_urc_evt_type_handlers))
goto out;
type_table = &uwbd_urc_evt_type_handlers[type];
if (type_table->uwbd_events == NULL)
goto out;
if (event >= type_table->size)
goto out;
handler = type_table->uwbd_events[event].handler;
if (handler == NULL)
goto out;
result = (*handler)(evt);
out:
if (result < 0)
dev_err(&evt->rc->uwb_dev.dev,
"UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n",
type, event, context, result);
return result;
}
static void uwbd_event_handle_message(struct uwb_event *evt)
{
struct uwb_rc *rc;
int result;
rc = evt->rc;
if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
return;
}
result = uwbd_message_handlers[evt->message].handler(evt);
if (result < 0)
dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
uwbd_message_handlers[evt->message].name, result);
}
static void uwbd_event_handle(struct uwb_event *evt)
{
struct uwb_rc *rc;
int should_keep;
rc = evt->rc;
if (rc->ready) {
switch (evt->type) {
case UWB_EVT_TYPE_NOTIF:
should_keep = uwbd_event_handle_urc(evt);
if (should_keep <= 0)
kfree(evt->notif.rceb);
break;
case UWB_EVT_TYPE_MSG:
uwbd_event_handle_message(evt);
break;
default:
dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
break;
}
}
__uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
}
/**
* UWB Daemon
*
* Listens to all UWB notifications and takes care to track the state
* of the UWB neighbourhood for the kernel. When we do a run, we
* spinlock, move the list to a private copy and release the
* lock. Hold it as little as possible. Not a conflict: it is
* guaranteed we own the events in the private list.
*
* FIXME: should change so we don't have a 1HZ timer all the time, but
* only if there are devices.
*/
static int uwbd(void *param)
{
struct uwb_rc *rc = param;
unsigned long flags;
struct uwb_event *evt;
int should_stop = 0;
while (1) {
wait_event_interruptible_timeout(
rc->uwbd.wq,
!list_empty(&rc->uwbd.event_list)
|| (should_stop = kthread_should_stop()),
HZ);
if (should_stop)
break;
spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
if (!list_empty(&rc->uwbd.event_list)) {
evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node);
list_del(&evt->list_node);
} else
evt = NULL;
spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
if (evt) {
uwbd_event_handle(evt);
kfree(evt);
}
uwb_beca_purge(rc); /* Purge devices that left */
}
return 0;
}
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
if (IS_ERR(task)) {
rc->uwbd.task = NULL;
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
} else {
rc->uwbd.task = task;
rc->uwbd.pid = rc->uwbd.task->pid;
}
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
if (rc->uwbd.task)
kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
/*
* Queue an event for the management daemon
*
* When some lower layer receives an event, it uses this function to
* push it forward to the UWB daemon.
*
* Once you pass the event, you don't own it any more, but the daemon
* does. It will uwb_event_free() it when done, so make sure you
* uwb_event_alloc()ed it or bad things will happen.
*
* If the daemon is not running, we just free the event.
*/
void uwbd_event_queue(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
unsigned long flags;
spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
if (rc->uwbd.pid != 0) {
list_add(&evt->list_node, &rc->uwbd.event_list);
wake_up_all(&rc->uwbd.wq);
} else {
__uwb_rc_put(evt->rc);
if (evt->type == UWB_EVT_TYPE_NOTIF)
kfree(evt->notif.rceb);
kfree(evt);
}
spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
return;
}
void uwbd_flush(struct uwb_rc *rc)
{
struct uwb_event *evt, *nxt;
spin_lock_irq(&rc->uwbd.event_list_lock);
list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) {
if (evt->rc == rc) {
__uwb_rc_put(rc);
list_del(&evt->list_node);
if (evt->type == UWB_EVT_TYPE_NOTIF)
kfree(evt->notif.rceb);
kfree(evt);
}
}
spin_unlock_irq(&rc->uwbd.event_list_lock);
}

View File

@ -1,467 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
* Radio Control command/event transport to the UWB stack
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Initialize and hook up the Radio Control interface.
*
* For each device probed, creates an 'struct whcrc' which contains
* just the representation of the UWB Radio Controller, and the logic
* for reading notifications and passing them to the UWB Core.
*
* So we initialize all of those, register the UWB Radio Controller
* and setup the notification/event handle to pipe the notifications
* to the UWB management Daemon.
*
* Once uwb_rc_add() is called, the UWB stack takes control, resets
* the radio and readies the device to take commands the UWB
* API/user-space.
*
* Note this driver is just a transport driver; the commands are
* formed at the UWB stack and given to this driver who will deliver
* them to the hw and transfer the replies/notifications back to the
* UWB stack through the UWB daemon (UWBD).
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "uwb.h"
#include "include/whci.h"
#include "include/umc.h"
#include "uwb-internal.h"
/**
* Descriptor for an instance of the UWB Radio Control Driver that
* attaches to the URC interface of the WHCI PCI card.
*
* Unless there is a lock specific to the 'data members', all access
* is protected by uwb_rc->mutex.
*/
struct whcrc {
struct umc_dev *umc_dev;
struct uwb_rc *uwb_rc; /* UWB host controller */
unsigned long area;
void __iomem *rc_base;
size_t rc_len;
spinlock_t irq_lock;
void *evt_buf, *cmd_buf;
dma_addr_t evt_dma_buf, cmd_dma_buf;
wait_queue_head_t cmd_wq;
struct work_struct event_work;
};
/**
* Execute an UWB RC command on WHCI/RC
*
* @rc: Instance of a Radio Controller that is a whcrc
* @cmd: Buffer containing the RCCB and payload to execute
* @cmd_size: Size of the command buffer.
*
* We copy the command into whcrc->cmd_buf (as it is pretty and
* aligned`and physically contiguous) and then press the right keys in
* the controller's URCCMD register to get it to read it. We might
* have to wait for the cmd_sem to be open to us.
*
* NOTE: rc's mutex has to be locked
*/
static int whcrc_cmd(struct uwb_rc *uwb_rc,
const struct uwb_rccb *cmd, size_t cmd_size)
{
int result = 0;
struct whcrc *whcrc = uwb_rc->priv;
struct device *dev = &whcrc->umc_dev->dev;
u32 urccmd;
if (cmd_size >= 4096)
return -EINVAL;
/*
* If the URC is halted, then the hardware has reset itself.
* Attempt to recover by restarting the device and then return
* an error as it's likely that the current command isn't
* valid for a newly started RC.
*/
if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
dev_err(dev, "requesting reset of halted radio controller\n");
uwb_rc_reset_all(uwb_rc);
return -EIO;
}
result = wait_event_timeout(whcrc->cmd_wq,
!(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
if (result == 0) {
dev_err(dev, "device is not ready to execute commands\n");
return -ETIMEDOUT;
}
memmove(whcrc->cmd_buf, cmd, cmd_size);
le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
spin_lock(&whcrc->irq_lock);
urccmd = le_readl(whcrc->rc_base + URCCMD);
urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
whcrc->rc_base + URCCMD);
spin_unlock(&whcrc->irq_lock);
return 0;
}
static int whcrc_reset(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
return umc_controller_reset(whcrc->umc_dev);
}
/**
* Reset event reception mechanism and tell hw we are ready to get more
*
* We have read all the events in the event buffer, so we are ready to
* reset it to the beginning.
*
* This is only called during initialization or after an event buffer
* has been retired. This means we can be sure that event processing
* is disabled and it's safe to update the URCEVTADDR register.
*
* There's no need to wait for the event processing to start as the
* URC will not clear URCCMD_ACTIVE until (internal) event buffer
* space is available.
*/
static
void whcrc_enable_events(struct whcrc *whcrc)
{
u32 urccmd;
le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
spin_lock(&whcrc->irq_lock);
urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
spin_unlock(&whcrc->irq_lock);
}
static void whcrc_event_work(struct work_struct *work)
{
struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
size_t size;
u64 urcevtaddr;
urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
whcrc_enable_events(whcrc);
}
/**
* Catch interrupts?
*
* We ack inmediately (and expect the hw to do the right thing and
* raise another IRQ if things have changed :)
*/
static
irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
{
struct whcrc *whcrc = _whcrc;
struct device *dev = &whcrc->umc_dev->dev;
u32 urcsts;
urcsts = le_readl(whcrc->rc_base + URCSTS);
if (!(urcsts & URCSTS_INT_MASK))
return IRQ_NONE;
le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
if (urcsts & URCSTS_HSE) {
dev_err(dev, "host system error -- hardware halted\n");
/* FIXME: do something sensible here */
goto out;
}
if (urcsts & URCSTS_ER)
schedule_work(&whcrc->event_work);
if (urcsts & URCSTS_RCI)
wake_up_all(&whcrc->cmd_wq);
out:
return IRQ_HANDLED;
}
/**
* Initialize a UMC RC interface: map regions, get (shared) IRQ
*/
static
int whcrc_setup_rc_umc(struct whcrc *whcrc)
{
int result = 0;
struct device *dev = &whcrc->umc_dev->dev;
struct umc_dev *umc_dev = whcrc->umc_dev;
whcrc->area = umc_dev->resource.start;
whcrc->rc_len = resource_size(&umc_dev->resource);
result = -EBUSY;
if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
goto error_request_region;
}
whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
if (whcrc->rc_base == NULL) {
dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
goto error_ioremap;
}
result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
KBUILD_MODNAME, whcrc);
if (result < 0) {
dev_err(dev, "can't allocate IRQ %d: %d\n",
umc_dev->irq, result);
goto error_request_irq;
}
result = -ENOMEM;
whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
&whcrc->cmd_dma_buf, GFP_KERNEL);
if (whcrc->cmd_buf == NULL) {
dev_err(dev, "Can't allocate cmd transfer buffer\n");
goto error_cmd_buffer;
}
whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
&whcrc->evt_dma_buf, GFP_KERNEL);
if (whcrc->evt_buf == NULL) {
dev_err(dev, "Can't allocate evt transfer buffer\n");
goto error_evt_buffer;
}
return 0;
error_evt_buffer:
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
whcrc->cmd_dma_buf);
error_cmd_buffer:
free_irq(umc_dev->irq, whcrc);
error_request_irq:
iounmap(whcrc->rc_base);
error_ioremap:
release_mem_region(whcrc->area, whcrc->rc_len);
error_request_region:
return result;
}
/**
* Release RC's UMC resources
*/
static
void whcrc_release_rc_umc(struct whcrc *whcrc)
{
struct umc_dev *umc_dev = whcrc->umc_dev;
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
whcrc->evt_dma_buf);
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
whcrc->cmd_dma_buf);
free_irq(umc_dev->irq, whcrc);
iounmap(whcrc->rc_base);
release_mem_region(whcrc->area, whcrc->rc_len);
}
/**
* whcrc_start_rc - start a WHCI radio controller
* @whcrc: the radio controller to start
*
* Reset the UMC device, start the radio controller, enable events and
* finally enable interrupts.
*/
static int whcrc_start_rc(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
struct device *dev = &whcrc->umc_dev->dev;
/* Reset the thing */
le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
5000, "hardware reset") < 0)
return -EBUSY;
/* Set the event buffer, start the controller (enable IRQs later) */
le_writel(0, whcrc->rc_base + URCINTR);
le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
5000, "radio controller start") < 0)
return -ETIMEDOUT;
whcrc_enable_events(whcrc);
le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
return 0;
}
/**
* whcrc_stop_rc - stop a WHCI radio controller
* @whcrc: the radio controller to stop
*
* Disable interrupts and cancel any pending event processing work
* before clearing the Run/Stop bit.
*/
static
void whcrc_stop_rc(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
struct umc_dev *umc_dev = whcrc->umc_dev;
le_writel(0, whcrc->rc_base + URCINTR);
cancel_work_sync(&whcrc->event_work);
le_writel(0, whcrc->rc_base + URCCMD);
whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop");
}
static void whcrc_init(struct whcrc *whcrc)
{
spin_lock_init(&whcrc->irq_lock);
init_waitqueue_head(&whcrc->cmd_wq);
INIT_WORK(&whcrc->event_work, whcrc_event_work);
}
/**
* Initialize the radio controller.
*
* NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
* IRQ handler we use that to determine if the hw is ready to
* handle events. Looks like a race condition, but it really is
* not.
*/
static
int whcrc_probe(struct umc_dev *umc_dev)
{
int result;
struct uwb_rc *uwb_rc;
struct whcrc *whcrc;
struct device *dev = &umc_dev->dev;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
dev_err(dev, "unable to allocate RC instance\n");
goto error_rc_alloc;
}
whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
if (whcrc == NULL) {
dev_err(dev, "unable to allocate WHC-RC instance\n");
goto error_alloc;
}
whcrc_init(whcrc);
whcrc->umc_dev = umc_dev;
result = whcrc_setup_rc_umc(whcrc);
if (result < 0) {
dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
goto error_setup_rc_umc;
}
whcrc->uwb_rc = uwb_rc;
uwb_rc->owner = THIS_MODULE;
uwb_rc->cmd = whcrc_cmd;
uwb_rc->reset = whcrc_reset;
uwb_rc->start = whcrc_start_rc;
uwb_rc->stop = whcrc_stop_rc;
result = uwb_rc_add(uwb_rc, dev, whcrc);
if (result < 0)
goto error_rc_add;
umc_set_drvdata(umc_dev, whcrc);
return 0;
error_rc_add:
whcrc_release_rc_umc(whcrc);
error_setup_rc_umc:
kfree(whcrc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
return result;
}
/**
* Clean up the radio control resources
*
* When we up the command semaphore, everybody possibly held trying to
* execute a command should be granted entry and then they'll see the
* host is quiescing and up it (so it will chain to the next waiter).
* This should not happen (in any case), as we can only remove when
* there are no handles open...
*/
static void whcrc_remove(struct umc_dev *umc_dev)
{
struct whcrc *whcrc = umc_get_drvdata(umc_dev);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
umc_set_drvdata(umc_dev, NULL);
uwb_rc_rm(uwb_rc);
whcrc_release_rc_umc(whcrc);
kfree(whcrc);
uwb_rc_put(uwb_rc);
}
static int whcrc_pre_reset(struct umc_dev *umc)
{
struct whcrc *whcrc = umc_get_drvdata(umc);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
uwb_rc_pre_reset(uwb_rc);
return 0;
}
static int whcrc_post_reset(struct umc_dev *umc)
{
struct whcrc *whcrc = umc_get_drvdata(umc);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
return uwb_rc_post_reset(uwb_rc);
}
/* PCI device ID's that we handle [so it gets loaded] */
static struct pci_device_id __used whcrc_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
{ /* empty last entry */ }
};
MODULE_DEVICE_TABLE(pci, whcrc_id_table);
static struct umc_driver whcrc_driver = {
.name = "whc-rc",
.cap_id = UMC_CAP_ID_WHCI_RC,
.probe = whcrc_probe,
.remove = whcrc_remove,
.pre_reset = whcrc_pre_reset,
.post_reset = whcrc_post_reset,
};
static int __init whcrc_driver_init(void)
{
return umc_driver_register(&whcrc_driver);
}
module_init(whcrc_driver_init);
static void __exit whcrc_driver_exit(void)
{
umc_driver_unregister(&whcrc_driver);
}
module_exit(whcrc_driver_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
MODULE_LICENSE("GPL");

View File

@ -1,257 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* WHCI UWB Multi-interface Controller enumerator.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "include/whci.h"
#include "include/umc.h"
struct whci_card {
struct pci_dev *pci;
void __iomem *uwbbase;
u8 n_caps;
struct umc_dev *devs[0];
};
/* Fix faulty HW :( */
static
u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
{
u64 capdata_orig = capdata;
struct pci_dev *pci_dev = card->pci;
if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
&& (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
&& pci_dev->class == 0x0d1010) {
switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
/* WLP capability has 0x100 bytes of aperture */
case 0x80:
capdata |= 0x40 << 8; break;
/* WUSB capability has 0x80 bytes of aperture
* and ID is 1 */
case 0x02:
capdata &= ~0xffff;
capdata |= 0x2001;
break;
}
}
if (capdata_orig != capdata)
dev_warn(&pci_dev->dev,
"PCI v%04x d%04x c%06x#%02x: "
"corrected capdata from %016Lx to %016Lx\n",
pci_dev->vendor, pci_dev->device, pci_dev->class,
(unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
(unsigned long long)capdata_orig,
(unsigned long long)capdata);
return capdata;
}
/**
* whci_wait_for - wait for a WHCI register to be set
*
* Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
*/
int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
unsigned long max_ms, const char *tag)
{
unsigned t = 0;
u32 val;
for (;;) {
val = le_readl(reg);
if ((val & mask) == result)
break;
if (t >= max_ms) {
dev_err(dev, "%s timed out\n", tag);
return -ETIMEDOUT;
}
msleep(10);
t += 10;
}
return 0;
}
EXPORT_SYMBOL_GPL(whci_wait_for);
/*
* NOTE: the capinfo and capdata registers are slightly different
* (size and cap-id fields). So for cap #0, we need to fill
* in. Size comes from the size of the register block
* (statically calculated); cap_id comes from nowhere, we use
* zero, that is reserved, for the radio controller, because
* none was defined at the spec level.
*/
static int whci_add_cap(struct whci_card *card, int n)
{
struct umc_dev *umc;
u64 capdata;
int bar, err;
umc = umc_device_create(&card->pci->dev, n);
if (umc == NULL)
return -ENOMEM;
capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
bar = UWBCAPDATA_TO_BAR(capdata) << 1;
capdata = whci_capdata_quirks(card, capdata);
/* Capability 0 is the radio controller. It's size is 32
* bytes (WHCI0.95[2.3, T2-9]). */
umc->version = UWBCAPDATA_TO_VERSION(capdata);
umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
umc->bar = bar;
umc->resource.start = pci_resource_start(card->pci, bar)
+ UWBCAPDATA_TO_OFFSET(capdata);
umc->resource.end = umc->resource.start
+ (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
umc->resource.name = dev_name(&umc->dev);
umc->resource.flags = card->pci->resource[bar].flags;
umc->resource.parent = &card->pci->resource[bar];
umc->irq = card->pci->irq;
err = umc_device_register(umc);
if (err < 0)
goto error;
card->devs[n] = umc;
return 0;
error:
kfree(umc);
return err;
}
static void whci_del_cap(struct whci_card *card, int n)
{
struct umc_dev *umc = card->devs[n];
umc_device_unregister(umc);
}
static int whci_n_caps(struct pci_dev *pci)
{
void __iomem *uwbbase;
u64 capinfo;
uwbbase = pci_iomap(pci, 0, 8);
if (!uwbbase)
return -ENOMEM;
capinfo = le_readq(uwbbase + UWBCAPINFO);
pci_iounmap(pci, uwbbase);
return UWBCAPINFO_TO_N_CAPS(capinfo);
}
static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct whci_card *card;
int err, n_caps, n;
err = pci_enable_device(pci);
if (err < 0)
goto error;
pci_enable_msi(pci);
pci_set_master(pci);
err = -ENXIO;
if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
else if (!pci_set_dma_mask(pci, DMA_BIT_MASK(32)))
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
else
goto error_dma;
err = n_caps = whci_n_caps(pci);
if (n_caps < 0)
goto error_ncaps;
err = -ENOMEM;
card = kzalloc(sizeof(struct whci_card)
+ sizeof(struct umc_dev *) * (n_caps + 1),
GFP_KERNEL);
if (card == NULL)
goto error_kzalloc;
card->pci = pci;
card->n_caps = n_caps;
err = -EBUSY;
if (!request_mem_region(pci_resource_start(pci, 0),
UWBCAPDATA_SIZE(card->n_caps),
"whci (capability data)"))
goto error_request_memregion;
err = -ENOMEM;
card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
if (!card->uwbbase)
goto error_iomap;
/* Add each capability. */
for (n = 0; n <= card->n_caps; n++) {
err = whci_add_cap(card, n);
if (err < 0 && n == 0) {
dev_err(&pci->dev, "cannot bind UWB radio controller:"
" %d\n", err);
goto error_bind;
}
if (err < 0)
dev_warn(&pci->dev, "warning: cannot bind capability "
"#%u: %d\n", n, err);
}
pci_set_drvdata(pci, card);
return 0;
error_bind:
pci_iounmap(pci, card->uwbbase);
error_iomap:
release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
error_request_memregion:
kfree(card);
error_kzalloc:
error_ncaps:
error_dma:
pci_disable_msi(pci);
pci_disable_device(pci);
error:
return err;
}
static void whci_remove(struct pci_dev *pci)
{
struct whci_card *card = pci_get_drvdata(pci);
int n;
pci_set_drvdata(pci, NULL);
/* Unregister each capability in reverse (so the master device
* is unregistered last). */
for (n = card->n_caps; n >= 0 ; n--)
whci_del_cap(card, n);
pci_iounmap(pci, card->uwbbase);
release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
kfree(card);
pci_disable_msi(pci);
pci_disable_device(pci);
}
static struct pci_device_id whci_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, whci_id_table);
static struct pci_driver whci_driver = {
.name = "whci",
.id_table = whci_id_table,
.probe = whci_probe,
.remove = whci_remove,
};
module_pci_driver(whci_driver);
MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
MODULE_LICENSE("GPL");

View File

@ -1,130 +0,0 @@
#! /bin/bash
#
set -e
progname=$(basename $0)
function help
{
cat <<EOF
Usage: $progname COMMAND DEVICEs [ARGS]
Command for manipulating the pairing/authentication credentials of a
Wireless USB device that supports wired-mode Cable-Based-Association.
Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
DEVICE
sysfs path to the device to authenticate; for example, both this
guys are the same:
/sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
/sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
COMMAND/ARGS are
start
Start a WUSB host controller (by setting up a CHID)
set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
Sets host information in the device; after this you can call the
get-cdid to see how does this device report itself to us.
get-cdid DEVICE
Get the device ID associated to the HOST-CHID we sent with
'set-chid'. We might not know about it.
set-cc DEVICE
If we allow the device to connect, set a random new CDID and CK
(connection key). Device saves them for the next time it wants to
connect wireless. We save them for that next time also so we can
authenticate the device (when we see the CDID he uses to id
itself) and the CK to crypto talk to it.
CHID is always 16 hex bytes in 'XX YY ZZ...' form
BANDGROUP is almost always 0001
Examples:
You can default most arguments to '' to get a sane value:
$ $progname set-chid '' '' '' "My host name"
A full sequence:
$ $progname set-chid '' '' '' "My host name"
$ $progname get-cdid ''
$ $progname set-cc ''
EOF
}
# Defaults
# FIXME: CHID should come from a database :), band group from the host
host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
host_band_group="0001"
host_name=$(hostname)
devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
result=0
case $1 in
start)
for dev in ${2:-$hdevs}
do
echo $host_CHID > $dev/wusb_chid
echo I: started host $(basename $dev) >&2
done
;;
stop)
for dev in ${2:-$hdevs}
do
echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
echo I: stopped host $(basename $dev) >&2
done
;;
set-chid)
shift
for dev in ${2:-$devs}; do
echo "${4:-$host_name}" > $dev/wusb_host_name
echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
echo ${2:-$host_CHID} > $dev/wusb_chid
done
;;
get-cdid)
for dev in ${2:-$devs}
do
cat $dev/wusb_cdid
done
;;
set-cc)
for dev in ${2:-$devs}; do
shift
CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
echo "$CDID" > $dev/wusb_cdid
echo "$CK" > $dev/wusb_ck
echo I: CC set >&2
echo "CHID: $(cat $dev/wusb_chid)"
echo "CDID:$CDID"
echo "CK: $CK"
done
;;
help|h|--help|-h)
help
;;
*)
echo "E: Unknown usage" 1>&2
help 1>&2
result=1
esac
exit $result

View File

@ -1,457 +0,0 @@
================================
Linux UWB + Wireless USB + WiNET
================================
Copyright (C) 2005-2006 Intel Corporation
Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License version
2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
updated content.
* Design-overview.txt-1.8
This code implements a Ultra Wide Band stack for Linux, as well as
drivers for the USB based UWB radio controllers defined in the
Wireless USB 1.0 specification (including Wireless USB host controller
and an Intel WiNET controller).
.. Contents
1. Introduction
1. HWA: Host Wire adapters, your Wireless USB dongle
2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
devices
3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
adapter
2. The UWB stack
1. Devices and hosts: the basic structure
2. Host Controller life cycle
3. On the air: beacons and enumerating the radio neighborhood
4. Device lists
5. Bandwidth allocation
3. Wireless USB Host Controller drivers
4. Glossary
Introduction
============
UWB is a wide-band communication protocol that is to serve also as the
low-level protocol for others (much like TCP sits on IP). Currently
these others are Wireless USB and TCP/IP, but seems Bluetooth and
Firewire/1394 are coming along.
UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
~-41dB (or 0.074 uW/MHz--geography specific data is still being
negotiated w/ regulators, so watch for changes). That band is divided in
a bunch of ~1.5 GHz wide channels (or band groups) composed of three
subbands/subchannels (528 MHz each). Each channel is independent of each
other, so you could consider them different "busses". Initially this
driver considers them all a single one.
Radio time is divided in 65536 us long /superframes/, each one divided
in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
time/media allocation units for transferring data. At the beginning of
each superframe there is a Beacon Period (BP), where every device
transmit its beacon on a single MAS. The length of the BP depends on how
many devices are present and the length of their beacons.
Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
bit address) and send periodic beacons to advertise themselves and pass
info on what they are and do. They advertise their capabilities and a
bunch of other stuff.
The different logical parts of this driver are:
*
*UWB*: the Ultra-Wide-Band stack -- manages the radio and
associated spectrum to allow for devices sharing it. Allows to
control bandwidth assignment, beaconing, scanning, etc
*
*WUSB*: the layer that sits on top of UWB to provide Wireless USB.
The Wireless USB spec defines means to control a UWB radio and to
do the actual WUSB.
HWA: Host Wire adapters, your Wireless USB dongle
-------------------------------------------------
WUSB also defines a device called a Host Wire Adaptor (HWA), which in
mere terms is a USB dongle that enables your PC to have UWB and Wireless
USB. The Wireless USB Host Controller in a HWA looks to the host like a
[Wireless] USB controller connected via USB (!)
The HWA itself is broken in two or three main interfaces:
*
*RC*: Radio control -- this implements an interface to the
Ultra-Wide-Band radio controller. The driver for this implements a
USB-based UWB Radio Controller to the UWB stack.
*
*HC*: the wireless USB host controller. It looks like a USB host
whose root port is the radio and the WUSB devices connect to it.
To the system it looks like a separate USB host. The driver (will)
implement a USB host controller (similar to UHCI, OHCI or EHCI)
for which the root hub is the radio...To reiterate: it is a USB
controller that is connected via USB instead of PCI.
*
*WINET*: some HW provide a WiNET interface (IP over UWB). This
package provides a driver for it (it looks like a network
interface, winetX). The driver detects when there is a link up for
their type and kick into gear.
DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
---------------------------------------------------------------
These are the complement to HWAs. They are a USB host for connecting
wired devices, but it is connected to your PC connected via Wireless
USB. To the system it looks like yet another USB host. To the untrained
eye, it looks like a hub that connects upstream wirelessly.
We still offer no support for this; however, it should share a lot of
code with the HWA-RC driver; there is a bunch of factorization work that
has been done to support that in upcoming releases.
WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
-------------------------------------------------------------------
This is your usual PCI device that implements WHCI. Similar in concept
to EHCI, it allows your wireless USB devices (including DWAs) to connect
to your host via a PCI interface. As in the case of the HWA, it has a
Radio Control interface and the WUSB Host Controller interface per se.
There is still no driver support for this, but will be in upcoming
releases.
The UWB stack
=============
The main mission of the UWB stack is to keep a tally of which devices
are in radio proximity to allow drivers to connect to them. As well, it
provides an API for controlling the local radio controllers (RCs from
now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
Devices and hosts: the basic structure
--------------------------------------
The main building block here is the UWB device (struct uwb_dev). For
each device that pops up in radio presence (ie: the UWB host receives a
beacon from it) you get a struct uwb_dev that will show up in
/sys/bus/uwb/devices.
For each RC that is detected, a new struct uwb_rc and struct uwb_dev are
created. An entry is also created in /sys/class/uwb_rc for each RC.
Each RC driver is implemented by a separate driver that plugs into the
interface that the UWB stack provides through a struct uwb_rc_ops. The
spec creators have been nice enough to make the message format the same
for HWA and WHCI RCs, so the driver is really a very thin transport that
moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
and sends the replies and notifications back to the API
[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
is chartered, among other things, to keep the tab of how the UWB radio
neighborhood looks, creating and destroying devices as they show up or
disappear.
Command execution is very simple: a command block is sent and a event
block or reply is expected back. For sending/receiving command/events, a
handle called /neh/ (Notification/Event Handle) is opened with
/uwb_rc_neh_open()/.
The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
for the PCI connected WHCI controller.
Host Controller life cycle
--------------------------
So let's say we connect a dongle to the system: it is detected and
firmware uploaded if needed [for Intel's i1480
/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
Now we have a real HWA device connected and
/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
Wire-Adaptor environment and then suck it into the UWB stack's vision of
the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
*
[*] The stack should put a new RC to scan for devices
[/uwb_rc_scan()/] so it finds what's available around and tries to
connect to them, but this is policy stuff and should be driven
from user space. As of now, the operator is expected to do it
manually; see the release notes for documentation on the procedure.
When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
takes time of tearing everything down safely (or not...).
On the air: beacons and enumerating the radio neighborhood
----------------------------------------------------------
So assuming we have devices and we have agreed for a channel to connect
on (let's say 9), we put the new RC to beacon:
*
$ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
Now it is visible. If there were other devices in the same radio channel
and beacon group (that's what the zero is for), the dongle's radio
control interface will send beacon notifications on its
notification/event endpoint (NEEP). The beacon notifications are part of
the event stream that is funneled into the API with
/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
daemon through a notification list.
UWBD wakes up and scans the event list; finds a beacon and adds it to
the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
the same device, he considers it to be 'onair' and creates a new device
[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
are received in some time, the device is considered gone and wiped out
[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
the beacon cache of dead devices].
Device lists
------------
All UWB devices are kept in the list of the struct bus_type uwb_bus_type.
Bandwidth allocation
--------------------
The UWB stack maintains a local copy of DRP availability through
processing of incoming *DRP Availability Change* notifications. This
local copy is currently used to present the current bandwidth
availability to the user through the sysfs file
/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
availability information will be used by the bandwidth reservation
routines.
The bandwidth reservation routines are in progress and are thus not
present in the current release. When completed they will enable a user
to initiate DRP reservation requests through interaction with sysfs. DRP
reservation requests from remote UWB devices will also be handled. The
bandwidth management done by the UWB stack will include callbacks to the
higher layers will enable the higher layers to use the reservations upon
completion. [Note: The bandwidth reservation work is in progress and
subject to change.]
Wireless USB Host Controller drivers
====================================
*WARNING* This section needs a lot of work!
As explained above, there are three different types of HCs in the WUSB
world: HWA-HC, DWA-HC and WHCI-HC.
HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
connected controllers), and their transfer management system is almost
identical. So is their notification delivery system.
HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
they have to deal with WUSB device life cycle and maintenance, wireless
root-hub
HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
three endpoints (Notifications, Data Transfer In and Data Transfer
Out--known as NEP, DTI and DTO in the code).
We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
ID and tell the HC to use all that. Then we start it. This means the HC
starts sending MMCs.
*
The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
that define a stream in the UWB channel time allocated for sending
WUSB IEs (host to device commands/notifications) and Device
Notifications (device initiated to host). Each host defines a
unique Wireless USB cluster through MMCs. Devices can connect to a
single cluster at the time. The IEs are Information Elements, and
among them are the bandwidth allocations that tell each device
when can they transmit or receive.
Now it all depends on external stimuli.
New device connection
---------------------
A new device pops up, it scans the radio looking for MMCs that give out
the existence of Wireless USB channels. Once one (or more) are found,
selects which one to connect to. Sends a /DN_Connect/ (device
notification connect) during the DNTS (Device Notification Time
Slot--announced in the MMCs
HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
into /devconnect/). This process starts the authentication process for
the device. First we allocate a /fake port/ and assign an
unauthenticated address (128 to 255--what we really do is
0x80 | fake_port_idx). We fiddle with the fake port status and /hub_wq/
sees a new connection, so he moves on to enable the fake port with a reset.
So now we are in the reset path -- we know we have a non-yet enumerated
device with an unauthorized address; we ask user space to authenticate
(FIXME: not yet done, similar to bluetooth pairing), then we do the key
exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
device to the default state. Device is authenticated.
From here, the USB stack takes control through the usb_hcd ops. hub_wq
has seen the port status changes, as we have been toggling them. It will
start enumerating and doing transfers through usb_hcd->urb_enqueue() to
read descriptors and move our data.
Device life cycle and keep alives
---------------------------------
Every time there is a successful transfer to/from a device, we update a
per-device activity timestamp. If not, every now and then we check and
if the activity timestamp gets old, we ping the device by sending it a
Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
arrives to us as a notification through
devconnect.c:wusb_handle_dn_alive(). If a device times out, we
disconnect it from the system (cleaning up internal information and
toggling the bits in the fake hub port, which kicks hub_wq into removing
the rest of the stuff).
This is done through devconnect:__wusb_check_devs(), which will scan the
device list looking for whom needs refreshing.
If the device wants to disconnect, it will either die (ugly) or send a
/DN_Disconnect/ that will prompt a disconnection from the system.
Sending and receiving data
--------------------------
Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
DWAs.
Each HC has a number of rpipes and buffers that can be assigned to them;
when doing a data transfer (xfer), first the rpipe has to be aimed and
prepared (buffers assigned), then we can start queueing requests for
data in or out.
Data buffers have to be segmented out before sending--so we send first a
header (segment request) and then if there is any data, a data buffer
immediately after to the DTI interface (yep, even the request). If our
buffer is bigger than the max segment size, then we just do multiple
requests.
[This sucks, because doing USB scatter gatter in Linux is resource
intensive, if any...not that the current approach is not. It just has to
be cleaned up a lot :)].
If reading, we don't send data buffers, just the segment headers saying
we want to read segments.
When the xfer is executed, we receive a notification that says data is
ready in the DTI endpoint (handled through
xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
descriptor that gives us the status of the transfer, its identification
(given when we issued it) and the segment number. If it was a data read,
we issue another URB to read into the destination buffer the chunk of
data coming out of the remote endpoint. Done, wait for the next guy. The
callbacks for the URBs issued from here are the ones that will declare
the xfer complete at some point and call its callback.
Seems simple, but the implementation is not trivial.
*
*WARNING* Old!!
The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
array of segments, tallys on segments and buffers and callback
information. Buried in there is a lot of URBs for executing the segments
and buffer transfers.
For OUT xfers, there is an array of segments, one URB for each, another
one of buffer URB. When submitting, we submit URBs for segment request
1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
result data; when all the segments are complete, we call the callback to
finalize the transfer.
For IN xfers, we only issue URBs for the segments we want to read and
then wait for the xfer result data.
URB mapping into xfers
^^^^^^^^^^^^^^^^^^^^^^
This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
rpipe to the endpoint where we have to transmit, create a transfer
context (wa_xfer) and submit it. When the xfer is done, our callback is
called and we assign the status bits and release the xfer resources.
In dequeue() we are basically cancelling/aborting the transfer. We issue
a xfer abort request to the HC, cancel all the URBs we had submitted
and not yet done and when all that is done, the xfer callback will be
called--this will call the URB callback.
Glossary
========
*DWA* -- Device Wire Adapter
USB host, wired for downstream devices, upstream connects wirelessly
with Wireless USB.
*EVENT* -- Response to a command on the NEEP
*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
*NEH* -- Notification/Event Handle
Handle/file descriptor for receiving notifications or events. The WA
code requires you to get one of this to listen for notifications or
events on the NEEP.
*NEEP* -- Notification/Event EndPoint
Stuff related to the management of the first endpoint of a HWA USB
dongle that is used to deliver an stream of events and notifications to
the host.
*NOTIFICATION* -- Message coming in the NEEP as response to something.
*RC* -- Radio Control
Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
InakyPerezGonzalez)

View File

@ -1,39 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Wireless USB Core configuration
#
config USB_WUSB
tristate "Enable Wireless USB extensions"
depends on UWB && USB
select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
help
Enable the host-side support for Wireless USB.
To compile this support select Y (built in). It is safe to
select even if you don't have the hardware.
config USB_WUSB_CBAF
tristate "Support WUSB Cable Based Association (CBA)"
depends on USB
help
Some WUSB devices support Cable Based Association. It's used to
enable the secure communication between the host and the
device.
Enable this option if your WUSB device must to be connected
via wired USB before establishing a wireless link.
It is safe to select even if you don't have a compatible
hardware.
config USB_WUSB_CBAF_DEBUG
bool "Enable CBA debug messages"
depends on USB_WUSB_CBAF
help
Say Y here if you want the CBA to produce a bunch of debug messages
to the system log. Select this if you are having a problem with
CBA support and want to see more of what is going on.
source "drivers/staging/wusbcore/host/Kconfig"

View File

@ -1,28 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_WUSB) += wusbcore.o
obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
wusbcore-y := \
crypto.o \
devconnect.o \
dev-sysfs.o \
mmc.o \
pal.o \
rh.o \
reservation.o \
security.o \
wusbhc.o
wusb-cbaf-y := cbaf.o
wusb-wa-y := \
wa-hc.o \
wa-nep.o \
wa-rpipe.o \
wa-xfer.o
obj-y += host/

View File

@ -1,8 +0,0 @@
TODO: Remove in late 2019 unless there are users
There seems to not be any real wireless USB devices anywhere in the wild
anymore. It turned out to be a failed technology :(
This will be removed from the tree if no one objects.
Greg Kroah-Hartman <gregkh@linuxfoundation.org>

View File

@ -1,645 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB - Cable Based Association
*
*
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* WUSB devices have to be paired (associated in WUSB lingo) so
* that they can connect to the system.
*
* One way of pairing is using CBA-Cable Based Association. First
* time you plug the device with a cable, association is done between
* host and device and subsequent times, you can connect wirelessly
* without having to associate again. That's the idea.
*
* This driver does nothing Earth shattering. It just provides an
* interface to chat with the wire-connected device so we can get a
* CDID (device ID) that might have been previously associated to a
* CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
* (connection context), with the CK being the secret, or connection
* key. This is the pairing data.
*
* When a device with the CBA capability connects, the probe routine
* just creates a bunch of sysfs files that a user space enumeration
* manager uses to allow it to connect wirelessly to the system or not.
*
* The process goes like this:
*
* 1. Device plugs, cbaf is loaded, notifications happen.
*
* 2. The connection manager (CM) sees a device with CBAF capability
* (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
*
* 3. The CM writes the host name, supported band groups, and the CHID
* (host ID) into the wusb_host_name, wusb_host_band_groups and
* wusb_chid files. These get sent to the device and the CDID (if
* any) for this host is requested.
*
* 4. The CM can verify that the device's supported band groups
* (wusb_device_band_groups) are compatible with the host.
*
* 5. The CM reads the wusb_cdid file.
*
* 6. The CM looks up its database
*
* 6.1 If it has a matching CHID,CDID entry, the device has been
* authorized before (paired) and nothing further needs to be
* done.
*
* 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
* its database), the device is assumed to be not known. The CM
* may associate the host with device by: writing a randomly
* generated CDID to wusb_cdid and then a random CK to wusb_ck
* (this uploads the new CC to the device).
*
* CMD may choose to prompt the user before associating with a new
* device.
*
* 7. Device is unplugged.
*
* When the device tries to connect wirelessly, it will present its
* CDID to the WUSB host controller. The CM will query the
* database. If the CHID/CDID pair found, it will (with a 4-way
* handshake) challenge the device to demonstrate it has the CK secret
* key (from our database) without actually exchanging it. Once
* satisfied, crypto keys are derived from the CK, the device is
* connected and all communication is encrypted.
*
* References:
* [WUSB-AM] Association Models Supplement to the Certified Wireless
* Universal Serial Bus Specification, version 1.0.
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/usb.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include "../uwb/uwb.h"
#include "include/wusb.h"
#include "include/association.h"
#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
/* An instance of a Cable-Based-Association-Framework device */
struct cbaf {
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
void *buffer;
size_t buffer_size;
struct wusb_ckhdid chid;
char host_name[CBA_NAME_LEN];
u16 host_band_groups;
struct wusb_ckhdid cdid;
char device_name[CBA_NAME_LEN];
u16 device_band_groups;
struct wusb_ckhdid ck;
};
/*
* Verify that a CBAF USB-interface has what we need
*
* According to [WUSB-AM], CBA devices should provide at least two
* interfaces:
* - RETRIEVE_HOST_INFO
* - ASSOCIATE
*
* If the device doesn't provide these interfaces, we do not know how
* to deal with it.
*/
static int cbaf_check(struct cbaf *cbaf)
{
int result;
struct device *dev = &cbaf->usb_iface->dev;
struct wusb_cbaf_assoc_info *assoc_info;
struct wusb_cbaf_assoc_request *assoc_request;
size_t assoc_size;
void *itr, *top;
int ar_rhi = 0, ar_assoc = 0;
result = usb_control_msg(
cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_GET_ASSOCIATION_INFORMATION,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot get available association types: %d\n",
result);
return result;
}
assoc_info = cbaf->buffer;
if (result < sizeof(*assoc_info)) {
dev_err(dev, "Not enough data to decode association info "
"header (%zu vs %zu bytes required)\n",
(size_t)result, sizeof(*assoc_info));
return result;
}
assoc_size = le16_to_cpu(assoc_info->Length);
if (result < assoc_size) {
dev_err(dev, "Not enough data to decode association info "
"(%zu vs %zu bytes required)\n",
(size_t)assoc_size, sizeof(*assoc_info));
return result;
}
/*
* From now on, we just verify, but won't error out unless we
* don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
* types.
*/
itr = cbaf->buffer + sizeof(*assoc_info);
top = cbaf->buffer + assoc_size;
dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
assoc_info->NumAssociationRequests, assoc_size);
while (itr < top) {
u16 ar_type, ar_subtype;
u32 ar_size;
const char *ar_name;
assoc_request = itr;
if (top - itr < sizeof(*assoc_request)) {
dev_err(dev, "Not enough data to decode association "
"request (%zu vs %zu bytes needed)\n",
top - itr, sizeof(*assoc_request));
break;
}
ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
ar_name = "unknown";
switch (ar_type) {
case AR_TYPE_WUSB:
/* Verify we have what is mandated by [WUSB-AM]. */
switch (ar_subtype) {
case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
ar_name = "RETRIEVE_HOST_INFO";
ar_rhi = 1;
break;
case AR_TYPE_WUSB_ASSOCIATE:
/* send assoc data */
ar_name = "ASSOCIATE";
ar_assoc = 1;
break;
}
break;
}
dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
"(%zu bytes): %s\n",
assoc_request->AssociationDataIndex, ar_type,
ar_subtype, (size_t)ar_size, ar_name);
itr += sizeof(*assoc_request);
}
if (!ar_rhi) {
dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
"request\n");
return -EINVAL;
}
if (!ar_assoc) {
dev_err(dev, "Missing ASSOCIATE association request\n");
return -EINVAL;
}
return 0;
}
static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
.AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
.CHID_hdr = WUSB_AR_CHID,
.LangID_hdr = WUSB_AR_LangID,
.HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
};
/* Send WUSB host information (CHID and name) to a CBAF device */
static int cbaf_send_host_info(struct cbaf *cbaf)
{
struct wusb_cbaf_host_info *hi;
size_t name_len;
size_t hi_size;
hi = cbaf->buffer;
memset(hi, 0, sizeof(*hi));
*hi = cbaf_host_info_defaults;
hi->CHID = cbaf->chid;
hi->LangID = 0; /* FIXME: I guess... */
strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
name_len = strlen(cbaf->host_name);
hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
hi_size = sizeof(*hi) + name_len;
return usb_control_msg(cbaf->usb_dev,
usb_sndctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0101,
cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
hi, hi_size, USB_CTRL_SET_TIMEOUT);
}
/*
* Get device's information (CDID) associated to CHID
*
* The device will return it's information (CDID, name, bandgroups)
* associated to the CHID we have set before, or 0 CDID and default
* name and bandgroup if no CHID set or unknown.
*/
static int cbaf_cdid_get(struct cbaf *cbaf)
{
int result;
struct device *dev = &cbaf->usb_iface->dev;
struct wusb_cbaf_device_info *di;
size_t needed;
di = cbaf->buffer;
result = usb_control_msg(
cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_GET_ASSOCIATION_REQUEST,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot request device information: %d\n",
result);
return result;
}
needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
if (result < needed) {
dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
"%zu bytes needed)\n", (size_t)result, needed);
return -ENOENT;
}
strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
cbaf->cdid = di->CDID;
cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
return 0;
}
static ssize_t cbaf_wusb_chid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return sprintf(buf, "%16ph\n", cbaf->chid.data);
}
static ssize_t cbaf_wusb_chid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
result = sscanf(buf,
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx",
&cbaf->chid.data[0] , &cbaf->chid.data[1],
&cbaf->chid.data[2] , &cbaf->chid.data[3],
&cbaf->chid.data[4] , &cbaf->chid.data[5],
&cbaf->chid.data[6] , &cbaf->chid.data[7],
&cbaf->chid.data[8] , &cbaf->chid.data[9],
&cbaf->chid.data[10], &cbaf->chid.data[11],
&cbaf->chid.data[12], &cbaf->chid.data[13],
&cbaf->chid.data[14], &cbaf->chid.data[15]);
if (result != 16)
return -EINVAL;
result = cbaf_send_host_info(cbaf);
if (result < 0)
return result;
result = cbaf_cdid_get(cbaf);
if (result < 0)
return result;
return size;
}
static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
static ssize_t cbaf_wusb_host_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
}
static ssize_t cbaf_wusb_host_name_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
result = sscanf(buf, "%63s", cbaf->host_name);
if (result != 1)
return -EINVAL;
return size;
}
static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
cbaf_wusb_host_name_store);
static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
}
static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
u16 band_groups = 0;
result = sscanf(buf, "%04hx", &band_groups);
if (result != 1)
return -EINVAL;
cbaf->host_band_groups = band_groups;
return size;
}
static DEVICE_ATTR(wusb_host_band_groups, 0600,
cbaf_wusb_host_band_groups_show,
cbaf_wusb_host_band_groups_store);
static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
.Length_hdr = WUSB_AR_Length,
.CDID_hdr = WUSB_AR_CDID,
.BandGroups_hdr = WUSB_AR_BandGroups,
.LangID_hdr = WUSB_AR_LangID,
.DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
};
static ssize_t cbaf_wusb_cdid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return sprintf(buf, "%16ph\n", cbaf->cdid.data);
}
static ssize_t cbaf_wusb_cdid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
struct wusb_ckhdid cdid;
result = sscanf(buf,
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx",
&cdid.data[0] , &cdid.data[1],
&cdid.data[2] , &cdid.data[3],
&cdid.data[4] , &cdid.data[5],
&cdid.data[6] , &cdid.data[7],
&cdid.data[8] , &cdid.data[9],
&cdid.data[10], &cdid.data[11],
&cdid.data[12], &cdid.data[13],
&cdid.data[14], &cdid.data[15]);
if (result != 16)
return -EINVAL;
cbaf->cdid = cdid;
return size;
}
static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
}
static DEVICE_ATTR(wusb_device_band_groups, 0600,
cbaf_wusb_device_band_groups_show,
NULL);
static ssize_t cbaf_wusb_device_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
}
static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
.AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
.Length_hdr = WUSB_AR_Length,
.Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
.ConnectionContext_hdr = WUSB_AR_ConnectionContext,
.BandGroups_hdr = WUSB_AR_BandGroups,
};
static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.Length_hdr = WUSB_AR_Length,
.AssociationStatus_hdr = WUSB_AR_AssociationStatus,
};
/*
* Send a new CC to the device.
*/
static int cbaf_cc_upload(struct cbaf *cbaf)
{
int result;
struct device *dev = &cbaf->usb_iface->dev;
struct wusb_cbaf_cc_data *ccd;
ccd = cbaf->buffer;
*ccd = cbaf_cc_data_defaults;
ccd->CHID = cbaf->chid;
ccd->CDID = cbaf->cdid;
ccd->CK = cbaf->ck;
ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
dev_dbg(dev, "Trying to upload CC:\n");
dev_dbg(dev, " CHID %16ph\n", ccd->CHID.data);
dev_dbg(dev, " CDID %16ph\n", ccd->CDID.data);
dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
result = usb_control_msg(
cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
return result;
}
static ssize_t cbaf_wusb_ck_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
result = sscanf(buf,
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx",
&cbaf->ck.data[0] , &cbaf->ck.data[1],
&cbaf->ck.data[2] , &cbaf->ck.data[3],
&cbaf->ck.data[4] , &cbaf->ck.data[5],
&cbaf->ck.data[6] , &cbaf->ck.data[7],
&cbaf->ck.data[8] , &cbaf->ck.data[9],
&cbaf->ck.data[10], &cbaf->ck.data[11],
&cbaf->ck.data[12], &cbaf->ck.data[13],
&cbaf->ck.data[14], &cbaf->ck.data[15]);
if (result != 16)
return -EINVAL;
result = cbaf_cc_upload(cbaf);
if (result < 0)
return result;
return size;
}
static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
static struct attribute *cbaf_dev_attrs[] = {
&dev_attr_wusb_host_name.attr,
&dev_attr_wusb_host_band_groups.attr,
&dev_attr_wusb_chid.attr,
&dev_attr_wusb_cdid.attr,
&dev_attr_wusb_device_name.attr,
&dev_attr_wusb_device_band_groups.attr,
&dev_attr_wusb_ck.attr,
NULL,
};
static const struct attribute_group cbaf_dev_attr_group = {
.name = NULL, /* we want them in the same directory */
.attrs = cbaf_dev_attrs,
};
static int cbaf_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct cbaf *cbaf;
struct device *dev = &iface->dev;
int result = -ENOMEM;
cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
if (cbaf == NULL)
goto error_kzalloc;
cbaf->buffer = kmalloc(512, GFP_KERNEL);
if (cbaf->buffer == NULL)
goto error_kmalloc_buffer;
cbaf->buffer_size = 512;
cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
cbaf->usb_iface = usb_get_intf(iface);
result = cbaf_check(cbaf);
if (result < 0) {
dev_err(dev, "This device is not WUSB-CBAF compliant and is not supported yet.\n");
goto error_check;
}
result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
if (result < 0) {
dev_err(dev, "Can't register sysfs attr group: %d\n", result);
goto error_create_group;
}
usb_set_intfdata(iface, cbaf);
return 0;
error_create_group:
error_check:
usb_put_intf(iface);
usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
error_kmalloc_buffer:
kfree(cbaf);
error_kzalloc:
return result;
}
static void cbaf_disconnect(struct usb_interface *iface)
{
struct cbaf *cbaf = usb_get_intfdata(iface);
struct device *dev = &iface->dev;
sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
/* paranoia: clean up crypto keys */
kzfree(cbaf);
}
static const struct usb_device_id cbaf_id_table[] = {
{ USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
{ },
};
MODULE_DEVICE_TABLE(usb, cbaf_id_table);
static struct usb_driver cbaf_driver = {
.name = "wusb-cbaf",
.id_table = cbaf_id_table,
.probe = cbaf_probe,
.disconnect = cbaf_disconnect,
};
module_usb_driver(cbaf_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Cable Based Association");
MODULE_LICENSE("GPL");

View File

@ -1,441 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Ultra Wide Band
* AES-128 CCM Encryption
*
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* We don't do any encryption here; we use the Linux Kernel's AES-128
* crypto modules to construct keys and payload blocks in a way
* defined by WUSB1.0[6]. Check the erratas, as typos are are patched
* there.
*
* Thanks a zillion to John Keys for his help and clarifications over
* the designed-by-a-committee text.
*
* So the idea is that there is this basic Pseudo-Random-Function
* defined in WUSB1.0[6.5] which is the core of everything. It works
* by tweaking some blocks, AES crypting them and then xoring
* something else with them (this seems to be called CBC(AES) -- can
* you tell I know jack about crypto?). So we just funnel it into the
* Linux Crypto API.
*
* We leave a crypto test module so we can verify that vectors match,
* every now and then.
*
* Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
* am learning a lot...
*
* Conveniently, some data structures that need to be
* funneled through AES are...16 bytes in size!
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include "../uwb/uwb.h"
#include "include/wusb.h"
static int debug_crypto_verify;
module_param(debug_crypto_verify, int, 0);
MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms");
static void wusb_key_dump(const void *buf, size_t len)
{
print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1,
buf, len, 0);
}
/*
* Block of data, as understood by AES-CCM
*
* The code assumes this structure is nothing but a 16 byte array
* (packed in a struct to avoid common mess ups that I usually do with
* arrays and enforcing type checking).
*/
struct aes_ccm_block {
u8 data[16];
} __attribute__((packed));
/*
* Counter-mode Blocks (WUSB1.0[6.4])
*
* According to CCM (or so it seems), for the purpose of calculating
* the MIC, the message is broken in N counter-mode blocks, B0, B1,
* ... BN.
*
* B0 contains flags, the CCM nonce and l(m).
*
* B1 contains l(a), the MAC header, the encryption offset and padding.
*
* If EO is nonzero, additional blocks are built from payload bytes
* until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
* padding is not xmitted.
*/
/* WUSB1.0[T6.4] */
struct aes_ccm_b0 {
u8 flags; /* 0x59, per CCM spec */
struct aes_ccm_nonce ccm_nonce;
__be16 lm;
} __attribute__((packed));
/* WUSB1.0[T6.5] */
struct aes_ccm_b1 {
__be16 la;
u8 mac_header[10];
__le16 eo;
u8 security_reserved; /* This is always zero */
u8 padding; /* 0 */
} __attribute__((packed));
/*
* Encryption Blocks (WUSB1.0[6.4.4])
*
* CCM uses Ax blocks to generate a keystream with which the MIC and
* the message's payload are encoded. A0 always encrypts/decrypts the
* MIC. Ax (x>0) are used for the successive payload blocks.
*
* The x is the counter, and is increased for each block.
*/
struct aes_ccm_a {
u8 flags; /* 0x01, per CCM spec */
struct aes_ccm_nonce ccm_nonce;
__be16 counter; /* Value of x */
} __attribute__((packed));
/* Scratch space for MAC calculations. */
struct wusb_mac_scratch {
struct aes_ccm_b0 b0;
struct aes_ccm_b1 b1;
struct aes_ccm_a ax;
};
/*
* CC-MAC function WUSB1.0[6.5]
*
* Take a data string and produce the encrypted CBC Counter-mode MIC
*
* Note the names for most function arguments are made to (more or
* less) match those used in the pseudo-function definition given in
* WUSB1.0[6.5].
*
* @tfm_cbc: CBC(AES) blkcipher handle (initialized)
*
* @tfm_aes: AES cipher handle (initialized)
*
* @mic: buffer for placing the computed MIC (Message Integrity
* Code). This is exactly 8 bytes, and we expect the buffer to
* be at least eight bytes in length.
*
* @key: 128 bit symmetric key
*
* @n: CCM nonce
*
* @a: ASCII string, 14 bytes long (I guess zero padded if needed;
* we use exactly 14 bytes).
*
* @b: data stream to be processed
*
* @blen: size of b...
*
* Still not very clear how this is done, but looks like this: we
* create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
* @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
* take the payload and divide it in blocks (16 bytes), xor them with
* the previous crypto result (16 bytes) and crypt it, repeat the next
* block with the output of the previous one, rinse wash. So we use
* the CBC-MAC(AES) shash, that does precisely that. The IV (Initial
* Vector) is 16 bytes and is set to zero, so
*
* (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
* using the 14 bytes of @a to fill up
* b1.{mac_header,e0,security_reserved,padding}.
*
* NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
* l(m) is orthogonal, they bear no relationship, so it is not
* in conflict with the parameter's relation that
* WUSB1.0[6.4.2]) defines.
*
* NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
* first errata released on 2005/07.
*
* NOTE: we need to clean IV to zero at each invocation to make sure
* we start with a fresh empty Initial Vector, so that the CBC
* works ok.
*
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
static int wusb_ccm_mac(struct crypto_shash *tfm_cbcmac,
struct wusb_mac_scratch *scratch,
void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
SHASH_DESC_ON_STACK(desc, tfm_cbcmac);
u8 iv[AES_BLOCK_SIZE];
/*
* These checks should be compile time optimized out
* ensure @a fills b1's mac_header and following fields
*/
BUILD_BUG_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
BUILD_BUG_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
BUILD_BUG_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
BUILD_BUG_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
/* Setup B0 */
scratch->b0.flags = 0x59; /* Format B0 */
scratch->b0.ccm_nonce = *n;
scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
/* Setup B1
*
* The WUSB spec is anything but clear! WUSB1.0[6.5]
* says that to initialize B1 from A with 'l(a) = blen +
* 14'--after clarification, it means to use A's contents
* for MAC Header, EO, sec reserved and padding.
*/
scratch->b1.la = cpu_to_be16(blen + 14);
memcpy(&scratch->b1.mac_header, a, sizeof(*a));
desc->tfm = tfm_cbcmac;
crypto_shash_init(desc);
crypto_shash_update(desc, (u8 *)&scratch->b0, sizeof(scratch->b0) +
sizeof(scratch->b1));
crypto_shash_finup(desc, b, blen, iv);
/* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
* The procedure is to AES crypt the A0 block and XOR the MIC
* Tag against it; we only do the first 8 bytes and place it
* directly in the destination buffer.
*/
scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
scratch->ax.ccm_nonce = *n;
scratch->ax.counter = 0;
/* reuse the CBC-MAC transform to perform the single block encryption */
crypto_shash_digest(desc, (u8 *)&scratch->ax, sizeof(scratch->ax),
(u8 *)&scratch->ax);
crypto_xor_cpy(mic, (u8 *)&scratch->ax, iv, 8);
return 8;
}
/*
* WUSB Pseudo Random Function (WUSB1.0[6.5])
*
* @b: buffer to the source data; cannot be a global or const local
* (will confuse the scatterlists)
*/
ssize_t wusb_prf(void *out, size_t out_size,
const u8 key[16], const struct aes_ccm_nonce *_n,
const struct aes_ccm_label *a,
const void *b, size_t blen, size_t len)
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
struct crypto_shash *tfm_cbcmac;
struct wusb_mac_scratch scratch;
u64 sfn = 0;
__le64 sfn_le;
tfm_cbcmac = crypto_alloc_shash("cbcmac(aes)", 0, 0);
if (IS_ERR(tfm_cbcmac)) {
result = PTR_ERR(tfm_cbcmac);
printk(KERN_ERR "E: can't load CBCMAC-AES: %d\n", (int)result);
goto error_alloc_cbcmac;
}
result = crypto_shash_setkey(tfm_cbcmac, key, AES_BLOCK_SIZE);
if (result < 0) {
printk(KERN_ERR "E: can't set CBCMAC-AES key: %d\n", (int)result);
goto error_setkey_cbcmac;
}
for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
sfn_le = cpu_to_le64(sfn++);
memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
result = wusb_ccm_mac(tfm_cbcmac, &scratch, out + bytes,
&n, a, b, blen);
if (result < 0)
goto error_ccm_mac;
bytes += result;
}
result = bytes;
error_ccm_mac:
error_setkey_cbcmac:
crypto_free_shash(tfm_cbcmac);
error_alloc_cbcmac:
return result;
}
/* WUSB1.0[A.2] test vectors */
static const u8 stv_hsmic_key[16] = {
0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
};
static const struct aes_ccm_nonce stv_hsmic_n = {
.sfn = { 0 },
.tkid = { 0x76, 0x98, 0x01, },
.dest_addr = { .data = { 0xbe, 0x00 } },
.src_addr = { .data = { 0x76, 0x98 } },
};
/*
* Out-of-band MIC Generation verification code
*
*/
static int wusb_oob_mic_verify(void)
{
int result;
u8 mic[8];
/* WUSB1.0[A.2] test vectors */
static const struct usb_handshake stv_hsmic_hs = {
.bMessageNumber = 2,
.bStatus = 00,
.tTKID = { 0x76, 0x98, 0x01 },
.bReserved = 00,
.CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
0x3c, 0x3d, 0x3e, 0x3f },
.nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
0x2c, 0x2d, 0x2e, 0x2f },
.MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
0x14, 0x7b },
};
size_t hs_size;
result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
if (result < 0)
printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
printk(KERN_ERR "E: OOB MIC test: "
"mismatch between MIC result and WUSB1.0[A2]\n");
hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
wusb_key_dump(&stv_hsmic_hs, hs_size);
printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
sizeof(stv_hsmic_n));
wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n));
printk(KERN_ERR "E: MIC out:\n");
wusb_key_dump(mic, sizeof(mic));
printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
result = -EINVAL;
} else
result = 0;
return result;
}
/*
* Test vectors for Key derivation
*
* These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
* (errata corrected in 2005/07).
*/
static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
};
static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
.sfn = { 0 },
.tkid = { 0x76, 0x98, 0x01, },
.dest_addr = { .data = { 0xbe, 0x00 } },
.src_addr = { .data = { 0x76, 0x98 } },
};
static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
.kck = {
0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
},
.ptk = {
0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
}
};
/*
* Performa a test to make sure we match the vectors defined in
* WUSB1.0[A.1](Errata2006/12)
*/
static int wusb_key_derive_verify(void)
{
int result = 0;
struct wusb_keydvt_out keydvt_out;
/* These come from WUSB1.0[A.1] + 2006/12 errata */
static const struct wusb_keydvt_in stv_keydvt_in_a1 = {
.hnonce = {
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
},
.dnonce = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
}
};
result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
&stv_keydvt_in_a1);
if (result < 0)
printk(KERN_ERR "E: WUSB key derivation test: "
"derivation failed: %d\n", result);
if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
printk(KERN_ERR "E: WUSB key derivation test: "
"mismatch between key derivation result "
"and WUSB1.0[A1] Errata 2006/12\n");
printk(KERN_ERR "E: keydvt in: key\n");
wusb_key_dump(stv_key_a1, sizeof(stv_key_a1));
printk(KERN_ERR "E: keydvt in: nonce\n");
wusb_key_dump(&stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n");
wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
printk(KERN_ERR "E: keydvt out: KCK\n");
wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck));
printk(KERN_ERR "E: keydvt out: PTK\n");
wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk));
result = -EINVAL;
} else
result = 0;
return result;
}
/*
* Initialize crypto system
*
* FIXME: we do nothing now, other than verifying. Later on we'll
* cache the encryption stuff, so that's why we have a separate init.
*/
int wusb_crypto_init(void)
{
int result;
if (debug_crypto_verify) {
result = wusb_key_derive_verify();
if (result < 0)
return result;
return wusb_oob_mic_verify();
}
return 0;
}
void wusb_crypto_exit(void)
{
/* FIXME: free cached crypto transforms */
}

View File

@ -1,124 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* WUSB devices
* sysfs bindings
*
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Get them out of the way...
*/
#include <linux/jiffies.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include "wusbhc.h"
static ssize_t wusb_disconnect_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct usb_device *usb_dev;
struct wusbhc *wusbhc;
unsigned command;
u8 port_idx;
if (sscanf(buf, "%u", &command) != 1)
return -EINVAL;
if (command == 0)
return size;
usb_dev = to_usb_device(dev);
wusbhc = wusbhc_get_by_usb_dev(usb_dev);
if (wusbhc == NULL)
return -ENODEV;
mutex_lock(&wusbhc->mutex);
port_idx = wusb_port_no_to_idx(usb_dev->portnum);
__wusbhc_dev_disable(wusbhc, port_idx);
mutex_unlock(&wusbhc->mutex);
wusbhc_put(wusbhc);
return size;
}
static DEVICE_ATTR_WO(wusb_disconnect);
static ssize_t wusb_cdid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t result;
struct wusb_dev *wusb_dev;
wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
if (wusb_dev == NULL)
return -ENODEV;
result = sprintf(buf, "%16ph\n", wusb_dev->cdid.data);
wusb_dev_put(wusb_dev);
return result;
}
static DEVICE_ATTR_RO(wusb_cdid);
static ssize_t wusb_ck_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int result;
struct usb_device *usb_dev;
struct wusbhc *wusbhc;
struct wusb_ckhdid ck;
result = sscanf(buf,
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx\n",
&ck.data[0] , &ck.data[1],
&ck.data[2] , &ck.data[3],
&ck.data[4] , &ck.data[5],
&ck.data[6] , &ck.data[7],
&ck.data[8] , &ck.data[9],
&ck.data[10], &ck.data[11],
&ck.data[12], &ck.data[13],
&ck.data[14], &ck.data[15]);
if (result != 16)
return -EINVAL;
usb_dev = to_usb_device(dev);
wusbhc = wusbhc_get_by_usb_dev(usb_dev);
if (wusbhc == NULL)
return -ENODEV;
result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
memzero_explicit(&ck, sizeof(ck));
wusbhc_put(wusbhc);
return result < 0 ? result : size;
}
static DEVICE_ATTR_WO(wusb_ck);
static struct attribute *wusb_dev_attrs[] = {
&dev_attr_wusb_disconnect.attr,
&dev_attr_wusb_cdid.attr,
&dev_attr_wusb_ck.attr,
NULL,
};
static const struct attribute_group wusb_dev_attr_group = {
.name = NULL, /* we want them in the same directory */
.attrs = wusb_dev_attrs,
};
int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
struct wusb_dev *wusb_dev)
{
int result = sysfs_create_group(&usb_dev->dev.kobj,
&wusb_dev_attr_group);
struct device *dev = &usb_dev->dev;
if (result < 0)
dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
result);
return result;
}
void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
{
struct usb_device *usb_dev = wusb_dev->usb_dev;
if (usb_dev)
sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver"
depends on USB_PCI && USB && UWB
select USB_WUSB
select UWB_WHCI
help
A driver for PCI-based Wireless USB Host Controllers that are
compliant with the WHCI specification.
To compile this driver a module, choose M here: the module
will be called "whci-hcd".
config USB_HWA_HCD
tristate "Host Wire Adapter (HWA) driver"
depends on USB && UWB
select USB_WUSB
select UWB_HWA
help
This driver enables you to connect Wireless USB devices to
your system using a Host Wire Adaptor USB dongle. This is an
UWB Radio Controller and WUSB Host Controller connected to
your machine via USB (specified in WUSB1.0).
To compile this driver a module, choose M here: the module
will be called "hwa-hc".

View File

@ -1,3 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o

View File

@ -1,875 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Host Wire Adapter:
* Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* The HWA driver is a simple layer that forwards requests to the WAHC
* (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
* Controller) layers.
*
* Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
* Host Controller that is connected to your system via USB (a USB
* dongle that implements a USB host...). There is also a Device Wired
* Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
* transferring data (it is after all a USB host connected via
* Wireless USB), we have a common layer called Wire Adapter Host
* Controller that does all the hard work. The WUSBHC (Wireless USB
* Host Controller) is the part common to WUSB Host Controllers, the
* HWA and the PCI-based one, that is implemented following the WHCI
* spec. All these layers are implemented in ../wusbcore.
*
* The main functions are hwahc_op_urb_{en,de}queue(), that pass the
* job of converting a URB to a Wire Adapter
*
* Entry points:
*
* hwahc_driver_*() Driver initialization, registration and
* teardown.
*
* hwahc_probe() New device came up, create an instance for
* it [from device enumeration].
*
* hwahc_disconnect() Remove device instance [from device
* enumeration].
*
* [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
* starting/stopping/etc (some might be made also
* DWA).
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/completion.h>
#include "../wa-hc.h"
#include "../wusbhc.h"
struct hwahc {
struct wusbhc wusbhc; /* has to be 1st */
struct wahc wa;
};
/*
* FIXME should be wusbhc
*
* NOTE: we need to cache the Cluster ID because later...there is no
* way to get it :)
*/
static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
{
int result;
struct wusbhc *wusbhc = &hwahc->wusbhc;
struct wahc *wa = &hwahc->wa;
struct device *dev = &wa->usb_iface->dev;
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_SET_CLUSTER_ID,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
cluster_id,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
cluster_id, result);
else
wusbhc->cluster_id = cluster_id;
dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
return result;
}
static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_SET_NUM_DNTS,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | slots,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
* Reset a WUSB host controller and wait for it to complete doing it.
*
* @usb_hcd: Pointer to WUSB Host Controller instance.
*
*/
static int hwahc_op_reset(struct usb_hcd *usb_hcd)
{
int result;
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct device *dev = &hwahc->wa.usb_iface->dev;
mutex_lock(&wusbhc->mutex);
wa_nep_disarm(&hwahc->wa);
result = __wa_set_feature(&hwahc->wa, WA_RESET);
if (result < 0) {
dev_err(dev, "error commanding HC to reset: %d\n", result);
goto error_unlock;
}
result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
if (result < 0) {
dev_err(dev, "error waiting for HC to reset: %d\n", result);
goto error_unlock;
}
error_unlock:
mutex_unlock(&wusbhc->mutex);
return result;
}
/*
* FIXME: break this function up
*/
static int hwahc_op_start(struct usb_hcd *usb_hcd)
{
u8 addr;
int result;
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
result = -ENOSPC;
mutex_lock(&wusbhc->mutex);
addr = wusb_cluster_id_get();
if (addr == 0)
goto error_cluster_id_get;
result = __hwahc_set_cluster_id(hwahc, addr);
if (result < 0)
goto error_set_cluster_id;
usb_hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
/*
* prevent USB core from suspending the root hub since
* bus_suspend and bus_resume are not yet supported.
*/
pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
result = 0;
out:
mutex_unlock(&wusbhc->mutex);
return result;
error_set_cluster_id:
wusb_cluster_id_put(addr);
error_cluster_id_get:
goto out;
}
/*
* No need to abort pipes, as when this is called, all the children
* has been disconnected and that has done it [through
* usb_disable_interface() -> usb_disable_endpoint() ->
* hwahc_op_ep_disable() - >rpipe_ep_disable()].
*/
static void hwahc_op_stop(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
mutex_lock(&wusbhc->mutex);
wusb_cluster_id_put(wusbhc->cluster_id);
mutex_unlock(&wusbhc->mutex);
}
static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
/*
* We cannot query the HWA for the WUSB time since that requires sending
* a synchronous URB and this function can be called in_interrupt.
* Instead, query the USB frame number for our parent and use that.
*/
return usb_get_current_frame_number(wa->usb_dev);
}
static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
gfp_t gfp)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
}
static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
int status)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
return wa_urb_dequeue(&hwahc->wa, urb, status);
}
/*
* Release resources allocated for an endpoint
*
* If there is an associated rpipe to this endpoint, go ahead and put it.
*/
static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
struct usb_host_endpoint *ep)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
rpipe_ep_disable(&hwahc->wa, ep);
}
static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
{
int result;
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct device *dev = &hwahc->wa.usb_iface->dev;
result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
if (result < 0) {
dev_err(dev, "error commanding HC to start: %d\n", result);
goto error_stop;
}
result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
if (result < 0) {
dev_err(dev, "error waiting for HC to start: %d\n", result);
goto error_stop;
}
result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "cannot listen to notifications: %d\n", result);
goto error_stop;
}
/*
* If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
* disable transfer notifications.
*/
if (hwahc->wa.quirks &
WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
struct usb_host_interface *cur_altsetting =
hwahc->wa.usb_iface->cur_altsetting;
result = usb_control_msg(hwahc->wa.usb_dev,
usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE,
WA_REQ_ALEREON_FEATURE_SET,
cur_altsetting->desc.bInterfaceNumber,
NULL, 0,
USB_CTRL_SET_TIMEOUT);
/*
* If we successfully sent the control message, start DTI here
* because no transfer notifications will be received which is
* where DTI is normally started.
*/
if (result == 0)
result = wa_dti_start(&hwahc->wa);
else
result = 0; /* OK. Continue normally. */
if (result < 0) {
dev_err(dev, "cannot start DTI: %d\n", result);
goto error_dti_start;
}
}
return result;
error_dti_start:
wa_nep_disarm(&hwahc->wa);
error_stop:
__wa_clear_feature(&hwahc->wa, WA_ENABLE);
return result;
}
static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
int ret;
ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_CHAN_STOP,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
wa_nep_disarm(&hwahc->wa);
__wa_stop(&hwahc->wa);
}
/*
* Set the UWB MAS allocation for the WUSB cluster
*
* @stream_index: stream to use (-1 for cancelling the allocation)
* @mas: mas bitmap to use
*/
static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
const struct uwb_mas_bm *mas)
{
int result;
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
struct device *dev = &wa->usb_iface->dev;
u8 mas_le[UWB_NUM_MAS/8];
/* Set the stream index */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_SET_STREAM_IDX,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
stream_index,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
goto out;
}
uwb_mas_bm_copy_le(mas_le, mas);
/* Set the MAS allocation */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_SET_WUSB_MAS,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
mas_le, 32, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
out:
return result;
}
/*
* Add an IE to the host's MMC
*
* @interval: See WUSB1.0[8.5.3.1]
* @repeat_cnt: See WUSB1.0[8.5.3.1]
* @handle: See WUSB1.0[8.5.3.1]
* @wuie: Pointer to the header of the WUSB IE data to add.
* MUST BE allocated in a kmalloc buffer (no stack or
* vmalloc).
*
* NOTE: the format of the WUSB IEs for MMCs are different to the
* normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
* Id in WUSB IEs). Standards...you gotta love'em.
*/
static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
u8 repeat_cnt, u8 handle,
struct wuie_hdr *wuie)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_ADD_MMC_IE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | repeat_cnt,
handle << 8 | iface_no,
wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
}
/*
* Remove an IE to the host's MMC
*
* @handle: See WUSB1.0[8.5.3.1]
*/
static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_REMOVE_MMC_IE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, handle << 8 | iface_no,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
* Update device information for a given fake port
*
* @port_idx: Fake port to which device is connected (wusbhc index, not
* USB port number).
*/
static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
struct wusb_dev *wusb_dev)
{
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
struct hwa_dev_info *dev_info;
int ret;
/* fill out the Device Info buffer and send it */
dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
if (!dev_info)
return -ENOMEM;
uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
&wusb_dev->availability);
dev_info->bDeviceAddress = wusb_dev->addr;
/*
* If the descriptors haven't been read yet, use a default PHY
* rate of 53.3 Mbit/s only. The correct value will be used
* when this will be called again as part of the
* authentication process (which occurs after the descriptors
* have been read).
*/
if (wusb_dev->wusb_cap_descr)
dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
else
dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
WUSB_REQ_SET_DEV_INFO,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wusb_dev->port_idx << 8 | iface_no,
dev_info, sizeof(struct hwa_dev_info),
USB_CTRL_SET_TIMEOUT);
kfree(dev_info);
return ret;
}
/*
* Set host's idea of which encryption (and key) method to use when
* talking to ad evice on a given port.
*
* If key is NULL, it means disable encryption for that "virtual port"
* (used when we disconnect).
*/
static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
const void *key, size_t key_size,
u8 key_idx)
{
int result = -ENOMEM;
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
struct usb_key_descriptor *keyd;
size_t keyd_len;
keyd_len = sizeof(*keyd) + key_size;
keyd = kzalloc(keyd_len, GFP_KERNEL);
if (keyd == NULL)
return -ENOMEM;
keyd->bLength = keyd_len;
keyd->bDescriptorType = USB_DT_KEY;
keyd->tTKID[0] = (tkid >> 0) & 0xff;
keyd->tTKID[1] = (tkid >> 8) & 0xff;
keyd->tTKID[2] = (tkid >> 16) & 0xff;
memcpy(keyd->bKeyData, key, key_size);
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
USB_DT_KEY << 8 | key_idx,
port_idx << 8 | iface_no,
keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
kzfree(keyd); /* clear keys etc. */
return result;
}
/*
* Set host's idea of which encryption (and key) method to use when
* talking to ad evice on a given port.
*
* If key is NULL, it means disable encryption for that "virtual port"
* (used when we disconnect).
*/
static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
const void *key, size_t key_size)
{
int result = -ENOMEM;
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
struct wahc *wa = &hwahc->wa;
u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
u8 encryption_value;
/* Tell the host which key to use to talk to the device */
if (key) {
u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
WUSB_KEY_INDEX_ORIGINATOR_HOST);
result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
key, key_size, key_idx);
if (result < 0)
goto error_set_key;
encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
} else {
/* FIXME: this should come from wusbhc->etd[UNSECURE].value */
encryption_value = 0;
}
/* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
encryption_value, port_idx << 8 | iface_no,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
"port index %u to %s (value %d): %d\n", port_idx,
wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
wusbhc->ccm1_etd->bEncryptionValue, result);
error_set_key:
return result;
}
/*
* Set host's GTK key
*/
static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
const void *key, size_t key_size)
{
u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
WUSB_KEY_INDEX_ORIGINATOR_HOST);
return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
}
/*
* Get the Wire Adapter class-specific descriptor
*
* NOTE: this descriptor comes with the big bundled configuration
* descriptor that includes the interfaces' and endpoints', so
* we just look for it in the cached copy kept by the USB stack.
*
* NOTE2: We convert LE fields to CPU order.
*/
static int wa_fill_descr(struct wahc *wa)
{
int result;
struct device *dev = &wa->usb_iface->dev;
char *itr;
struct usb_device *usb_dev = wa->usb_dev;
struct usb_descriptor_header *hdr;
struct usb_wa_descriptor *wa_descr;
size_t itr_size, actconfig_idx;
actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
sizeof(usb_dev->config[0]);
itr = usb_dev->rawdescriptors[actconfig_idx];
itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
while (itr_size >= sizeof(*hdr)) {
hdr = (struct usb_descriptor_header *) itr;
dev_dbg(dev, "Extra device descriptor: "
"type %02x/%u bytes @ %zu (%zu left)\n",
hdr->bDescriptorType, hdr->bLength,
(itr - usb_dev->rawdescriptors[actconfig_idx]),
itr_size);
if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
goto found;
itr += hdr->bLength;
itr_size -= hdr->bLength;
}
dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
return -ENODEV;
found:
result = -EINVAL;
if (hdr->bLength > itr_size) { /* is it available? */
dev_err(dev, "incomplete Wire Adapter Class descriptor "
"(%zu bytes left, %u needed)\n",
itr_size, hdr->bLength);
goto error;
}
if (hdr->bLength < sizeof(*wa->wa_descr)) {
dev_err(dev, "short Wire Adapter Class descriptor\n");
goto error;
}
wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
(le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
result = 0;
error:
return result;
}
static const struct hc_driver hwahc_hc_driver = {
.description = "hwa-hcd",
.product_desc = "Wireless USB HWA host controller",
.hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
.irq = NULL, /* FIXME */
.flags = HCD_USB25,
.reset = hwahc_op_reset,
.start = hwahc_op_start,
.stop = hwahc_op_stop,
.get_frame_number = hwahc_op_get_frame_number,
.urb_enqueue = hwahc_op_urb_enqueue,
.urb_dequeue = hwahc_op_urb_dequeue,
.endpoint_disable = hwahc_op_endpoint_disable,
.hub_status_data = wusbhc_rh_status_data,
.hub_control = wusbhc_rh_control,
.start_port_reset = wusbhc_rh_start_port_reset,
};
static int hwahc_security_create(struct hwahc *hwahc)
{
int result;
struct wusbhc *wusbhc = &hwahc->wusbhc;
struct usb_device *usb_dev = hwahc->wa.usb_dev;
struct device *dev = &usb_dev->dev;
struct usb_security_descriptor *secd;
struct usb_encryption_descriptor *etd;
void *itr, *top;
size_t itr_size, needed, bytes;
u8 index;
char buf[64];
/* Find the host's security descriptors in the config descr bundle */
index = (usb_dev->actconfig - usb_dev->config) /
sizeof(usb_dev->config[0]);
itr = usb_dev->rawdescriptors[index];
itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
top = itr + itr_size;
result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
if (result == -1) {
dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
return 0;
}
needed = sizeof(*secd);
if (top - (void *)secd < needed) {
dev_err(dev, "BUG? Not enough data to process security "
"descriptor header (%zu bytes left vs %zu needed)\n",
top - (void *) secd, needed);
return 0;
}
needed = le16_to_cpu(secd->wTotalLength);
if (top - (void *)secd < needed) {
dev_err(dev, "BUG? Not enough data to process security "
"descriptors (%zu bytes left vs %zu needed)\n",
top - (void *) secd, needed);
return 0;
}
/* Walk over the sec descriptors and store CCM1's on wusbhc */
itr = (void *) secd + sizeof(*secd);
top = (void *) secd + le16_to_cpu(secd->wTotalLength);
index = 0;
bytes = 0;
while (itr < top) {
etd = itr;
if (top - itr < sizeof(*etd)) {
dev_err(dev, "BUG: bad host security descriptor; "
"not enough data (%zu vs %zu left)\n",
top - itr, sizeof(*etd));
break;
}
if (etd->bLength < sizeof(*etd)) {
dev_err(dev, "BUG: bad host encryption descriptor; "
"descriptor is too short "
"(%zu vs %zu needed)\n",
(size_t)etd->bLength, sizeof(*etd));
break;
}
itr += etd->bLength;
bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
"%s (0x%02x) ",
wusb_et_name(etd->bEncryptionType),
etd->bEncryptionValue);
wusbhc->ccm1_etd = etd;
}
dev_info(dev, "supported encryption types: %s\n", buf);
if (wusbhc->ccm1_etd == NULL) {
dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
return 0;
}
/* Pretty print what we support */
return 0;
}
static void hwahc_security_release(struct hwahc *hwahc)
{
/* nothing to do here so far... */
}
static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
struct wusbhc *wusbhc = &hwahc->wusbhc;
struct wahc *wa = &hwahc->wa;
struct usb_device *usb_dev = interface_to_usbdev(iface);
wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
wa->usb_iface = usb_get_intf(iface);
wusbhc->dev = dev;
/* defer getting the uwb_rc handle until it is needed since it
* may not have been registered by the hwa_rc driver yet. */
wusbhc->uwb_rc = NULL;
result = wa_fill_descr(wa); /* Get the device descriptor */
if (result < 0)
goto error_fill_descriptor;
if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
"adapter (%u ports)\n", wa->wa_descr->bNumPorts);
wusbhc->ports_max = USB_MAXCHILDREN;
} else {
wusbhc->ports_max = wa->wa_descr->bNumPorts;
}
wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
wusbhc->start = __hwahc_op_wusbhc_start;
wusbhc->stop = __hwahc_op_wusbhc_stop;
wusbhc->mmcie_add = __hwahc_op_mmcie_add;
wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
wusbhc->dev_info_set = __hwahc_op_dev_info_set;
wusbhc->bwa_set = __hwahc_op_bwa_set;
wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
wusbhc->set_ptk = __hwahc_op_set_ptk;
wusbhc->set_gtk = __hwahc_op_set_gtk;
result = hwahc_security_create(hwahc);
if (result < 0) {
dev_err(dev, "Can't initialize security: %d\n", result);
goto error_security_create;
}
wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
result = wusbhc_create(&hwahc->wusbhc);
if (result < 0) {
dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
goto error_wusbhc_create;
}
result = wa_create(&hwahc->wa, iface, quirks);
if (result < 0)
goto error_wa_create;
return 0;
error_wa_create:
wusbhc_destroy(&hwahc->wusbhc);
error_wusbhc_create:
/* WA Descr fill allocs no resources */
error_security_create:
error_fill_descriptor:
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
}
static void hwahc_destroy(struct hwahc *hwahc)
{
struct wusbhc *wusbhc = &hwahc->wusbhc;
mutex_lock(&wusbhc->mutex);
__wa_destroy(&hwahc->wa);
wusbhc_destroy(&hwahc->wusbhc);
hwahc_security_release(hwahc);
hwahc->wusbhc.dev = NULL;
uwb_rc_put(wusbhc->uwb_rc);
usb_put_intf(hwahc->wa.usb_iface);
usb_put_dev(hwahc->wa.usb_dev);
mutex_unlock(&wusbhc->mutex);
}
static void hwahc_init(struct hwahc *hwahc)
{
wa_init(&hwahc->wa);
}
static int hwahc_probe(struct usb_interface *usb_iface,
const struct usb_device_id *id)
{
int result;
struct usb_hcd *usb_hcd;
struct wusbhc *wusbhc;
struct hwahc *hwahc;
struct device *dev = &usb_iface->dev;
result = -ENOMEM;
usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
if (usb_hcd == NULL) {
dev_err(dev, "unable to allocate instance\n");
goto error_alloc;
}
usb_hcd->wireless = 1;
usb_hcd->self.sg_tablesize = ~0;
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
result = hwahc_create(hwahc, usb_iface, id->driver_info);
if (result < 0) {
dev_err(dev, "Cannot initialize internals: %d\n", result);
goto error_hwahc_create;
}
result = usb_add_hcd(usb_hcd, 0, 0);
if (result < 0) {
dev_err(dev, "Cannot add HCD: %d\n", result);
goto error_add_hcd;
}
device_wakeup_enable(usb_hcd->self.controller);
result = wusbhc_b_create(&hwahc->wusbhc);
if (result < 0) {
dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
goto error_wusbhc_b_create;
}
return 0;
error_wusbhc_b_create:
usb_remove_hcd(usb_hcd);
error_add_hcd:
hwahc_destroy(hwahc);
error_hwahc_create:
usb_put_hcd(usb_hcd);
error_alloc:
return result;
}
static void hwahc_disconnect(struct usb_interface *usb_iface)
{
struct usb_hcd *usb_hcd;
struct wusbhc *wusbhc;
struct hwahc *hwahc;
usb_hcd = usb_get_intfdata(usb_iface);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
wusbhc_b_destroy(&hwahc->wusbhc);
usb_remove_hcd(usb_hcd);
hwahc_destroy(hwahc);
usb_put_hcd(usb_hcd);
}
static const struct usb_device_id hwahc_id_table[] = {
/* Alereon 5310 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
.driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* Alereon 5611 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
.driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
};
MODULE_DEVICE_TABLE(usb, hwahc_id_table);
static struct usb_driver hwahc_driver = {
.name = "hwa-hc",
.probe = hwahc_probe,
.disconnect = hwahc_disconnect,
.id_table = hwahc_id_table,
};
module_usb_driver(hwahc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
MODULE_LICENSE("GPL");

View File

@ -1,14 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
whci-hcd-y := \
asl.o \
debug.o \
hcd.o \
hw.o \
init.o \
int.o \
pzl.o \
qset.o \
wusb.o

View File

@ -1,376 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) asynchronous schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
struct whc_qset **next, struct whc_qset **prev)
{
struct list_head *n, *p;
BUG_ON(list_empty(&whc->async_list));
n = qset->list_node.next;
if (n == &whc->async_list)
n = n->next;
p = qset->list_node.prev;
if (p == &whc->async_list)
p = p->prev;
*next = container_of(n, struct whc_qset, list_node);
*prev = container_of(p, struct whc_qset, list_node);
}
static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
{
list_move(&qset->list_node, &whc->async_list);
qset->in_sw_list = true;
}
static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *next, *prev;
qset_clear(whc, qset);
/* Link into ASL. */
qset_get_next_prev(whc, qset, &next, &prev);
whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
qset->in_hw_list = true;
}
static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *prev, *next;
qset_get_next_prev(whc, qset, &next, &prev);
list_move(&qset->list_node, &whc->async_removed_list);
qset->in_sw_list = false;
/*
* No more qsets in the ASL? The caller must stop the ASL as
* it's no longer valid.
*/
if (list_empty(&whc->async_list))
return;
/* Remove from ASL. */
whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
qset->in_hw_list = false;
}
/**
* process_qset - process any recently inactivated or halted qTDs in a
* qset.
*
* After inactive qTDs are removed, new qTDs can be added if the
* urb queue still contains URBs.
*
* Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
* WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
*/
static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
{
enum whc_update update = 0;
uint32_t status = 0;
while (qset->ntds) {
struct whc_qtd *td;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
/*
* Nothing to do with a still active qTD.
*/
if (status & QTD_STS_ACTIVE)
break;
if (status & QTD_STS_HALTED) {
/* Ug, an error. */
process_halted_qtd(whc, qset, td);
/* A halted qTD always triggers an update
because the qset was either removed or
reactivated. */
update |= WHC_UPDATE_UPDATED;
goto done;
}
/* Mmm, a completed qTD. */
process_inactive_qtd(whc, qset, td);
}
if (!qset->remove)
update |= qset_add_qtds(whc, qset);
done:
/*
* Remove this qset from the ASL if requested, but only if has
* no qTDs.
*/
if (qset->remove && qset->ntds == 0) {
asl_qset_remove(whc, qset);
update |= WHC_UPDATE_REMOVED;
}
return update;
}
void asl_start(struct whc *whc)
{
struct whc_qset *qset;
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
1000, "start ASL");
}
void asl_stop(struct whc *whc)
{
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, 0,
1000, "stop ASL");
}
/**
* asl_update - request an ASL update and wait for the hardware to be synced
* @whc: the WHCI HC
* @wusbcmd: WUSBCMD value to start the update.
*
* If the WUSB HC is inactive (i.e., the ASL is stopped) then the
* update must be skipped as the hardware may not respond to update
* requests.
*/
void asl_update(struct whc *whc, uint32_t wusbcmd)
{
struct wusbhc *wusbhc = &whc->wusbhc;
long t;
mutex_lock(&wusbhc->mutex);
if (wusbhc->active) {
whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
t = wait_event_timeout(
whc->async_list_wq,
(le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
msecs_to_jiffies(1000));
if (t == 0)
whc_hw_error(whc, "ASL update timeout");
}
mutex_unlock(&wusbhc->mutex);
}
/**
* scan_async_work - scan the ASL for qsets to process.
*
* Process each qset in the ASL in turn and then signal the WHC that
* the ASL has been updated.
*
* Then start, stop or update the asynchronous schedule as required.
*/
void scan_async_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, async_work);
struct whc_qset *qset, *t;
enum whc_update update = 0;
spin_lock_irq(&whc->lock);
/*
* Transerve the software list backwards so new qsets can be
* safely inserted into the ASL without making it non-circular.
*/
list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
if (!qset->in_hw_list) {
asl_qset_insert(whc, qset);
update |= WHC_UPDATE_ADDED;
}
update |= process_qset(whc, qset);
}
spin_unlock_irq(&whc->lock);
if (update) {
uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
if (update & WHC_UPDATE_REMOVED)
wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
asl_update(whc, wusbcmd);
}
/*
* Now that the ASL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* ASL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
asl_qset_insert_begin(whc, qset);
queue_work(whc->workqueue, &whc->async_work);
}
}
}
spin_unlock_irq(&whc->lock);
}
/**
* asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
* @whc: the WHCI host controller
* @urb: the URB to enqueue
* @mem_flags: flags for any memory allocations
*
* The qset for the endpoint is obtained and the urb queued on to it.
*
* Work is scheduled to update the hardware's view of the ASL.
*/
int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
{
struct whc_qset *qset;
int err;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
if (err < 0) {
spin_unlock_irqrestore(&whc->lock, flags);
return err;
}
qset = get_qset(whc, urb, GFP_ATOMIC);
if (qset == NULL)
err = -ENOMEM;
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list && !qset->remove)
asl_qset_insert_begin(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
spin_unlock_irqrestore(&whc->lock, flags);
if (!err)
queue_work(whc->workqueue, &whc->async_work);
return err;
}
/**
* asl_urb_dequeue - remove an URB (qset) from the async list.
* @whc: the WHCI host controller
* @urb: the URB to dequeue
* @status: the current status of the URB
*
* URBs that do yet have qTDs can simply be removed from the software
* queue, otherwise the qset must be removed from the ASL so the qTDs
* can be removed.
*/
int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
{
struct whc_urb *wurb = urb->hcpriv;
struct whc_qset *qset = wurb->qset;
struct whc_std *std, *t;
bool has_qtd = false;
int ret;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
if (ret < 0)
goto out;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb) {
if (std->qtd)
has_qtd = true;
qset_free_std(whc, std);
} else
std->qtd = NULL; /* so this std is re-added when the qset is */
}
if (has_qtd) {
asl_qset_remove(whc, qset);
wurb->status = status;
wurb->is_async = true;
queue_work(whc->workqueue, &wurb->dequeue_work);
} else
qset_remove_urb(whc, qset, urb, status);
out:
spin_unlock_irqrestore(&whc->lock, flags);
return ret;
}
/**
* asl_qset_delete - delete a qset from the ASL
*/
void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 1;
queue_work(whc->workqueue, &whc->async_work);
qset_delete(whc, qset);
}
/**
* asl_init - initialize the asynchronous schedule list
*
* A dummy qset with no qTDs is added to the ASL to simplify removing
* qsets (no need to stop the ASL when the last qset is removed).
*/
int asl_init(struct whc *whc)
{
struct whc_qset *qset;
qset = qset_alloc(whc, GFP_KERNEL);
if (qset == NULL)
return -ENOMEM;
asl_qset_insert_begin(whc, qset);
asl_qset_insert(whc, qset);
return 0;
}
/**
* asl_clean_up - free ASL resources
*
* The ASL is stopped and empty except for the dummy qset.
*/
void asl_clean_up(struct whc *whc)
{
struct whc_qset *qset;
if (!list_empty(&whc->async_list)) {
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
list_del(&qset->list_node);
qset_free(whc, qset);
}
}

View File

@ -1,153 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) debug.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include "../../wusbhc.h"
#include "whcd.h"
struct whc_dbg {
struct dentry *di_f;
struct dentry *asl_f;
struct dentry *pzl_f;
};
static void qset_print(struct seq_file *s, struct whc_qset *qset)
{
static const char *qh_type[] = {
"ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
struct whc_std *std;
struct urb *urb = NULL;
int i;
seq_printf(s, "qset %08x", (u32)qset->qset_dma);
if (&qset->list_node == qset->whc->async_list.prev) {
seq_printf(s, " (dummy)\n");
} else {
seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
qset->qh.info1 & 0x0f,
(qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
qh_type[(qset->qh.info1 >> 5) & 0x7],
(qset->qh.info1 >> 16) & 0xffff);
}
seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
seq_printf(s, " info: %08x %08x %08x\n",
qset->qh.info1, qset->qh.info2, qset->qh.info3);
seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
seq_printf(s, " TD: sts: %08x opts: %08x\n",
qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
i == qset->td_start ? 'S' : ' ',
i == qset->td_end ? 'E' : ' ',
i, qset->qtd[i].status, qset->qtd[i].options,
(u32)qset->qtd[i].page_list_ptr);
}
seq_printf(s, " ntds: %d\n", qset->ntds);
list_for_each_entry(std, &qset->stds, list_node) {
if (urb != std->urb) {
urb = std->urb;
seq_printf(s, " urb %p transferred: %d bytes\n", urb,
urb->actual_length);
}
if (std->qtd)
seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n",
std->qtd - &qset->qtd[0],
std->len, std->num_pointers ?
(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
else
seq_printf(s, " sTD[-]: %zd bytes @ %08x\n",
std->len, std->num_pointers ?
(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
}
}
static int di_show(struct seq_file *s, void *p)
{
struct whc *whc = s->private;
int d;
for (d = 0; d < whc->n_devices; d++) {
struct di_buf_entry *di = &whc->di_buf[d];
seq_printf(s, "DI[%d]\n", d);
seq_printf(s, " availability: %*pb\n",
UWB_NUM_MAS, (unsigned long *)di->availability_info);
seq_printf(s, " %c%c key idx: %d dev addr: %d\n",
(di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
(di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
(di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
(di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(di);
static int asl_show(struct seq_file *s, void *p)
{
struct whc *whc = s->private;
struct whc_qset *qset;
list_for_each_entry(qset, &whc->async_list, list_node) {
qset_print(s, qset);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(asl);
static int pzl_show(struct seq_file *s, void *p)
{
struct whc *whc = s->private;
struct whc_qset *qset;
int period;
for (period = 0; period < 5; period++) {
seq_printf(s, "Period %d\n", period);
list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
qset_print(s, qset);
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pzl);
void whc_dbg_init(struct whc *whc)
{
if (whc->wusbhc.pal.debugfs_dir == NULL)
return;
whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL);
if (whc->dbg == NULL)
return;
whc->dbg->di_f = debugfs_create_file("di", 0444,
whc->wusbhc.pal.debugfs_dir, whc,
&di_fops);
whc->dbg->asl_f = debugfs_create_file("asl", 0444,
whc->wusbhc.pal.debugfs_dir, whc,
&asl_fops);
whc->dbg->pzl_f = debugfs_create_file("pzl", 0444,
whc->wusbhc.pal.debugfs_dir, whc,
&pzl_fops);
}
void whc_dbg_clean_up(struct whc *whc)
{
if (whc->dbg) {
debugfs_remove(whc->dbg->pzl_f);
debugfs_remove(whc->dbg->asl_f);
debugfs_remove(whc->dbg->di_f);
kfree(whc->dbg);
}
}

View File

@ -1,356 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) driver.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
/*
* One time initialization.
*
* Nothing to do here.
*/
static int whc_reset(struct usb_hcd *usb_hcd)
{
return 0;
}
/*
* Start the wireless host controller.
*
* Start device notification.
*
* Put hc into run state, set DNTS parameters.
*/
static int whc_start(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
u8 bcid;
int ret;
mutex_lock(&wusbhc->mutex);
le_writel(WUSBINTR_GEN_CMD_DONE
| WUSBINTR_HOST_ERR
| WUSBINTR_ASYNC_SCHED_SYNCED
| WUSBINTR_DNTS_INT
| WUSBINTR_ERR_INT
| WUSBINTR_INT,
whc->base + WUSBINTR);
/* set cluster ID */
bcid = wusb_cluster_id_get();
ret = whc_set_cluster_id(whc, bcid);
if (ret < 0)
goto out;
wusbhc->cluster_id = bcid;
/* start HC */
whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
usb_hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
out:
mutex_unlock(&wusbhc->mutex);
return ret;
}
/*
* Stop the wireless host controller.
*
* Stop device notification.
*
* Wait for pending transfer to stop? Put hc into stop state?
*/
static void whc_stop(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
mutex_lock(&wusbhc->mutex);
/* stop HC */
le_writel(0, whc->base + WUSBINTR);
whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
100, "HC to halt");
wusb_cluster_id_put(wusbhc->cluster_id);
mutex_unlock(&wusbhc->mutex);
}
static int whc_get_frame_number(struct usb_hcd *usb_hcd)
{
/* Frame numbers are not applicable to WUSB. */
return -ENOSYS;
}
/*
* Queue an URB to the ASL or PZL
*/
static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
gfp_t mem_flags)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
int ret;
switch (usb_pipetype(urb->pipe)) {
case PIPE_INTERRUPT:
ret = pzl_urb_enqueue(whc, urb, mem_flags);
break;
case PIPE_ISOCHRONOUS:
dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
ret = -ENOTSUPP;
break;
case PIPE_CONTROL:
case PIPE_BULK:
default:
ret = asl_urb_enqueue(whc, urb, mem_flags);
break;
}
return ret;
}
/*
* Remove a queued URB from the ASL or PZL.
*/
static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
int ret;
switch (usb_pipetype(urb->pipe)) {
case PIPE_INTERRUPT:
ret = pzl_urb_dequeue(whc, urb, status);
break;
case PIPE_ISOCHRONOUS:
ret = -ENOTSUPP;
break;
case PIPE_CONTROL:
case PIPE_BULK:
default:
ret = asl_urb_dequeue(whc, urb, status);
break;
}
return ret;
}
/*
* Wait for all URBs to the endpoint to be completed, then delete the
* qset.
*/
static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
struct usb_host_endpoint *ep)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
struct whc_qset *qset;
qset = ep->hcpriv;
if (qset) {
ep->hcpriv = NULL;
if (usb_endpoint_xfer_bulk(&ep->desc)
|| usb_endpoint_xfer_control(&ep->desc))
asl_qset_delete(whc, qset);
else
pzl_qset_delete(whc, qset);
}
}
static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
struct usb_host_endpoint *ep)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
struct whc_qset *qset;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
qset = ep->hcpriv;
if (qset) {
qset->remove = 1;
qset->reset = 1;
if (usb_endpoint_xfer_bulk(&ep->desc)
|| usb_endpoint_xfer_control(&ep->desc))
queue_work(whc->workqueue, &whc->async_work);
else
queue_work(whc->workqueue, &whc->periodic_work);
}
spin_unlock_irqrestore(&whc->lock, flags);
}
static const struct hc_driver whc_hc_driver = {
.description = "whci-hcd",
.product_desc = "Wireless host controller",
.hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
.irq = whc_int_handler,
.flags = HCD_USB2,
.reset = whc_reset,
.start = whc_start,
.stop = whc_stop,
.get_frame_number = whc_get_frame_number,
.urb_enqueue = whc_urb_enqueue,
.urb_dequeue = whc_urb_dequeue,
.endpoint_disable = whc_endpoint_disable,
.endpoint_reset = whc_endpoint_reset,
.hub_status_data = wusbhc_rh_status_data,
.hub_control = wusbhc_rh_control,
.start_port_reset = wusbhc_rh_start_port_reset,
};
static int whc_probe(struct umc_dev *umc)
{
int ret;
struct usb_hcd *usb_hcd;
struct wusbhc *wusbhc;
struct whc *whc;
struct device *dev = &umc->dev;
usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
if (usb_hcd == NULL) {
dev_err(dev, "unable to create hcd\n");
return -ENOMEM;
}
usb_hcd->wireless = 1;
usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
whc = wusbhc_to_whc(wusbhc);
whc->umc = umc;
ret = whc_init(whc);
if (ret)
goto error_whc_init;
wusbhc->dev = dev;
wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
if (!wusbhc->uwb_rc) {
ret = -ENODEV;
dev_err(dev, "cannot get radio controller\n");
goto error_uwb_rc;
}
if (whc->n_devices > USB_MAXCHILDREN) {
dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
whc->n_devices);
wusbhc->ports_max = USB_MAXCHILDREN;
} else
wusbhc->ports_max = whc->n_devices;
wusbhc->mmcies_max = whc->n_mmc_ies;
wusbhc->start = whc_wusbhc_start;
wusbhc->stop = whc_wusbhc_stop;
wusbhc->mmcie_add = whc_mmcie_add;
wusbhc->mmcie_rm = whc_mmcie_rm;
wusbhc->dev_info_set = whc_dev_info_set;
wusbhc->bwa_set = whc_bwa_set;
wusbhc->set_num_dnts = whc_set_num_dnts;
wusbhc->set_ptk = whc_set_ptk;
wusbhc->set_gtk = whc_set_gtk;
ret = wusbhc_create(wusbhc);
if (ret)
goto error_wusbhc_create;
ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "cannot add HCD: %d\n", ret);
goto error_usb_add_hcd;
}
device_wakeup_enable(usb_hcd->self.controller);
ret = wusbhc_b_create(wusbhc);
if (ret) {
dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
goto error_wusbhc_b_create;
}
whc_dbg_init(whc);
return 0;
error_wusbhc_b_create:
usb_remove_hcd(usb_hcd);
error_usb_add_hcd:
wusbhc_destroy(wusbhc);
error_wusbhc_create:
uwb_rc_put(wusbhc->uwb_rc);
error_uwb_rc:
whc_clean_up(whc);
error_whc_init:
usb_put_hcd(usb_hcd);
return ret;
}
static void whc_remove(struct umc_dev *umc)
{
struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
if (usb_hcd) {
whc_dbg_clean_up(whc);
wusbhc_b_destroy(wusbhc);
usb_remove_hcd(usb_hcd);
wusbhc_destroy(wusbhc);
uwb_rc_put(wusbhc->uwb_rc);
whc_clean_up(whc);
usb_put_hcd(usb_hcd);
}
}
static struct umc_driver whci_hc_driver = {
.name = "whci-hcd",
.cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
.probe = whc_probe,
.remove = whc_remove,
};
static int __init whci_hc_driver_init(void)
{
return umc_driver_register(&whci_hc_driver);
}
module_init(whci_hc_driver_init);
static void __exit whci_hc_driver_exit(void)
{
umc_driver_unregister(&whci_hc_driver);
}
module_exit(whci_hc_driver_exit);
/* PCI device ID's that we handle (so it gets loaded) */
static struct pci_device_id __used whci_hcd_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
{ /* empty last entry */ }
};
MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
MODULE_LICENSE("GPL");

View File

@ -1,93 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) hardware access helpers.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
{
unsigned long flags;
u32 cmd;
spin_lock_irqsave(&whc->lock, flags);
cmd = le_readl(whc->base + WUSBCMD);
cmd = (cmd & ~mask) | val;
le_writel(cmd, whc->base + WUSBCMD);
spin_unlock_irqrestore(&whc->lock, flags);
}
/**
* whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
* @whc: the WHCI HC
* @cmd: command to start.
* @params: parameters for the command (the WUSBGENCMDPARAMS register value).
* @addr: pointer to any data for the command (may be NULL).
* @len: length of the data (if any).
*/
int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
{
unsigned long flags;
dma_addr_t dma_addr;
int t;
int ret = 0;
mutex_lock(&whc->mutex);
/* Wait for previous command to complete. */
t = wait_event_timeout(whc->cmd_wq,
(le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
WHC_GENCMD_TIMEOUT_MS);
if (t == 0) {
dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
le_readl(whc->base + WUSBGENCMDSTS),
le_readl(whc->base + WUSBGENCMDPARAMS));
ret = -ETIMEDOUT;
goto out;
}
if (addr) {
memcpy(whc->gen_cmd_buf, addr, len);
dma_addr = whc->gen_cmd_buf_dma;
} else
dma_addr = 0;
/* Poke registers to start cmd. */
spin_lock_irqsave(&whc->lock, flags);
le_writel(params, whc->base + WUSBGENCMDPARAMS);
le_writeq(dma_addr, whc->base + WUSBGENADDR);
le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
whc->base + WUSBGENCMDSTS);
spin_unlock_irqrestore(&whc->lock, flags);
out:
mutex_unlock(&whc->mutex);
return ret;
}
/**
* whc_hw_error - recover from a hardware error
* @whc: the WHCI HC that broke.
* @reason: a description of the failure.
*
* Recover from broken hardware with a full reset.
*/
void whc_hw_error(struct whc *whc, const char *reason)
{
struct wusbhc *wusbhc = &whc->wusbhc;
dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
wusbhc_reset_all(wusbhc);
}

View File

@ -1,177 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) initialization.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
/*
* Reset the host controller.
*/
static void whc_hw_reset(struct whc *whc)
{
le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
100, "reset");
}
static void whc_hw_init_di_buf(struct whc *whc)
{
int d;
/* Disable all entries in the Device Information buffer. */
for (d = 0; d < whc->n_devices; d++)
whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
}
static void whc_hw_init_dn_buf(struct whc *whc)
{
/* Clear the Device Notification buffer to ensure the V (valid)
* bits are clear. */
memset(whc->dn_buf, 0, 4096);
le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
}
int whc_init(struct whc *whc)
{
u32 whcsparams;
int ret, i;
resource_size_t start, len;
spin_lock_init(&whc->lock);
mutex_init(&whc->mutex);
init_waitqueue_head(&whc->cmd_wq);
init_waitqueue_head(&whc->async_list_wq);
init_waitqueue_head(&whc->periodic_list_wq);
whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
if (whc->workqueue == NULL) {
ret = -ENOMEM;
goto error;
}
INIT_WORK(&whc->dn_work, whc_dn_work);
INIT_WORK(&whc->async_work, scan_async_work);
INIT_LIST_HEAD(&whc->async_list);
INIT_LIST_HEAD(&whc->async_removed_list);
INIT_WORK(&whc->periodic_work, scan_periodic_work);
for (i = 0; i < 5; i++)
INIT_LIST_HEAD(&whc->periodic_list[i]);
INIT_LIST_HEAD(&whc->periodic_removed_list);
/* Map HC registers. */
start = whc->umc->resource.start;
len = whc->umc->resource.end - start + 1;
if (!request_mem_region(start, len, "whci-hc")) {
dev_err(&whc->umc->dev, "can't request HC region\n");
ret = -EBUSY;
goto error;
}
whc->base_phys = start;
whc->base = ioremap(start, len);
if (!whc->base) {
dev_err(&whc->umc->dev, "ioremap\n");
ret = -ENOMEM;
goto error;
}
whc_hw_reset(whc);
/* Read maximum number of devices, keys and MMC IEs. */
whcsparams = le_readl(whc->base + WHCSPARAMS);
whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
whc->n_devices, whc->n_keys, whc->n_mmc_ies);
whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
sizeof(struct whc_qset), 64, 0);
if (whc->qset_pool == NULL) {
ret = -ENOMEM;
goto error;
}
ret = asl_init(whc);
if (ret < 0)
goto error;
ret = pzl_init(whc);
if (ret < 0)
goto error;
/* Allocate and initialize a buffer for generic commands, the
Device Information buffer, and the Device Notification
buffer. */
whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
&whc->gen_cmd_buf_dma, GFP_KERNEL);
if (whc->gen_cmd_buf == NULL) {
ret = -ENOMEM;
goto error;
}
whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
&whc->dn_buf_dma, GFP_KERNEL);
if (!whc->dn_buf) {
ret = -ENOMEM;
goto error;
}
whc_hw_init_dn_buf(whc);
whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
sizeof(struct di_buf_entry) * whc->n_devices,
&whc->di_buf_dma, GFP_KERNEL);
if (!whc->di_buf) {
ret = -ENOMEM;
goto error;
}
whc_hw_init_di_buf(whc);
return 0;
error:
whc_clean_up(whc);
return ret;
}
void whc_clean_up(struct whc *whc)
{
resource_size_t len;
if (whc->di_buf)
dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
whc->di_buf, whc->di_buf_dma);
if (whc->dn_buf)
dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
whc->dn_buf, whc->dn_buf_dma);
if (whc->gen_cmd_buf)
dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
pzl_clean_up(whc);
asl_clean_up(whc);
dma_pool_destroy(whc->qset_pool);
len = resource_size(&whc->umc->resource);
if (whc->base)
iounmap(whc->base);
if (whc->base_phys)
release_mem_region(whc->base_phys, len);
if (whc->workqueue)
destroy_workqueue(whc->workqueue);
}

View File

@ -1,82 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) interrupt handling.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
static void transfer_done(struct whc *whc)
{
queue_work(whc->workqueue, &whc->async_work);
queue_work(whc->workqueue, &whc->periodic_work);
}
irqreturn_t whc_int_handler(struct usb_hcd *hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
u32 sts;
sts = le_readl(whc->base + WUSBSTS);
if (!(sts & WUSBSTS_INT_MASK))
return IRQ_NONE;
le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
if (sts & WUSBSTS_GEN_CMD_DONE)
wake_up(&whc->cmd_wq);
if (sts & WUSBSTS_HOST_ERR)
dev_err(&whc->umc->dev, "FIXME: host system error\n");
if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
wake_up(&whc->async_list_wq);
if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
wake_up(&whc->periodic_list_wq);
if (sts & WUSBSTS_DNTS_INT)
queue_work(whc->workqueue, &whc->dn_work);
/*
* A transfer completed (see [WHCI] section 4.7.1.2 for when
* this occurs).
*/
if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
transfer_done(whc);
return IRQ_HANDLED;
}
static int process_dn_buf(struct whc *whc)
{
struct wusbhc *wusbhc = &whc->wusbhc;
struct dn_buf_entry *dn;
int processed = 0;
for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
if (dn->status & WHC_DN_STATUS_VALID) {
wusbhc_handle_dn(wusbhc, dn->src_addr,
(struct wusb_dn_hdr *)dn->dn_data,
dn->msg_size);
dn->status &= ~WHC_DN_STATUS_VALID;
processed++;
}
}
return processed;
}
void whc_dn_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, dn_work);
int processed;
do {
processed = process_dn_buf(whc);
} while (processed);
}

View File

@ -1,404 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) periodic schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
{
switch (period) {
case 0:
whc_qset_set_link_ptr(&whc->pz_list[0], addr);
whc_qset_set_link_ptr(&whc->pz_list[2], addr);
whc_qset_set_link_ptr(&whc->pz_list[4], addr);
whc_qset_set_link_ptr(&whc->pz_list[6], addr);
whc_qset_set_link_ptr(&whc->pz_list[8], addr);
whc_qset_set_link_ptr(&whc->pz_list[10], addr);
whc_qset_set_link_ptr(&whc->pz_list[12], addr);
whc_qset_set_link_ptr(&whc->pz_list[14], addr);
break;
case 1:
whc_qset_set_link_ptr(&whc->pz_list[1], addr);
whc_qset_set_link_ptr(&whc->pz_list[5], addr);
whc_qset_set_link_ptr(&whc->pz_list[9], addr);
whc_qset_set_link_ptr(&whc->pz_list[13], addr);
break;
case 2:
whc_qset_set_link_ptr(&whc->pz_list[3], addr);
whc_qset_set_link_ptr(&whc->pz_list[11], addr);
break;
case 3:
whc_qset_set_link_ptr(&whc->pz_list[7], addr);
break;
case 4:
whc_qset_set_link_ptr(&whc->pz_list[15], addr);
break;
}
}
/*
* Return the 'period' to use for this qset. The minimum interval for
* the endpoint is used so whatever urbs are submitted the device is
* polled often enough.
*/
static int qset_get_period(struct whc *whc, struct whc_qset *qset)
{
uint8_t bInterval = qset->ep->desc.bInterval;
if (bInterval < 6)
bInterval = 6;
if (bInterval > 10)
bInterval = 10;
return bInterval - 6;
}
static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
{
int period;
period = qset_get_period(whc, qset);
qset_clear(whc, qset);
list_move(&qset->list_node, &whc->periodic_list[period]);
qset->in_sw_list = true;
}
static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
{
list_move(&qset->list_node, &whc->periodic_removed_list);
qset->in_hw_list = false;
qset->in_sw_list = false;
}
/**
* pzl_process_qset - process any recently inactivated or halted qTDs
* in a qset.
*
* After inactive qTDs are removed, new qTDs can be added if the
* urb queue still contains URBs.
*
* Returns the schedule updates required.
*/
static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
{
enum whc_update update = 0;
uint32_t status = 0;
while (qset->ntds) {
struct whc_qtd *td;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
/*
* Nothing to do with a still active qTD.
*/
if (status & QTD_STS_ACTIVE)
break;
if (status & QTD_STS_HALTED) {
/* Ug, an error. */
process_halted_qtd(whc, qset, td);
/* A halted qTD always triggers an update
because the qset was either removed or
reactivated. */
update |= WHC_UPDATE_UPDATED;
goto done;
}
/* Mmm, a completed qTD. */
process_inactive_qtd(whc, qset, td);
}
if (!qset->remove)
update |= qset_add_qtds(whc, qset);
done:
/*
* If there are no qTDs in this qset, remove it from the PZL.
*/
if (qset->remove && qset->ntds == 0) {
pzl_qset_remove(whc, qset);
update |= WHC_UPDATE_REMOVED;
}
return update;
}
/**
* pzl_start - start the periodic schedule
* @whc: the WHCI host controller
*
* The PZL must be valid (e.g., all entries in the list should have
* the T bit set).
*/
void pzl_start(struct whc *whc)
{
le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
1000, "start PZL");
}
/**
* pzl_stop - stop the periodic schedule
* @whc: the WHCI host controller
*/
void pzl_stop(struct whc *whc)
{
whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_PERIODIC_SCHED, 0,
1000, "stop PZL");
}
/**
* pzl_update - request a PZL update and wait for the hardware to be synced
* @whc: the WHCI HC
* @wusbcmd: WUSBCMD value to start the update.
*
* If the WUSB HC is inactive (i.e., the PZL is stopped) then the
* update must be skipped as the hardware may not respond to update
* requests.
*/
void pzl_update(struct whc *whc, uint32_t wusbcmd)
{
struct wusbhc *wusbhc = &whc->wusbhc;
long t;
mutex_lock(&wusbhc->mutex);
if (wusbhc->active) {
whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
t = wait_event_timeout(
whc->periodic_list_wq,
(le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
msecs_to_jiffies(1000));
if (t == 0)
whc_hw_error(whc, "PZL update timeout");
}
mutex_unlock(&wusbhc->mutex);
}
static void update_pzl_hw_view(struct whc *whc)
{
struct whc_qset *qset, *t;
int period;
u64 tmp_qh = 0;
for (period = 0; period < 5; period++) {
list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
tmp_qh = qset->qset_dma;
qset->in_hw_list = true;
}
update_pzl_pointers(whc, period, tmp_qh);
}
}
/**
* scan_periodic_work - scan the PZL for qsets to process.
*
* Process each qset in the PZL in turn and then signal the WHC that
* the PZL has been updated.
*
* Then start, stop or update the periodic schedule as required.
*/
void scan_periodic_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, periodic_work);
struct whc_qset *qset, *t;
enum whc_update update = 0;
int period;
spin_lock_irq(&whc->lock);
for (period = 4; period >= 0; period--) {
list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
if (!qset->in_hw_list)
update |= WHC_UPDATE_ADDED;
update |= pzl_process_qset(whc, qset);
}
}
if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
update_pzl_hw_view(whc);
spin_unlock_irq(&whc->lock);
if (update) {
uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
if (update & WHC_UPDATE_REMOVED)
wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
pzl_update(whc, wusbcmd);
}
/*
* Now that the PZL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* PZL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
qset_insert_in_sw_list(whc, qset);
queue_work(whc->workqueue, &whc->periodic_work);
}
}
}
spin_unlock_irq(&whc->lock);
}
/**
* pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
* @whc: the WHCI host controller
* @urb: the URB to enqueue
* @mem_flags: flags for any memory allocations
*
* The qset for the endpoint is obtained and the urb queued on to it.
*
* Work is scheduled to update the hardware's view of the PZL.
*/
int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
{
struct whc_qset *qset;
int err;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
if (err < 0) {
spin_unlock_irqrestore(&whc->lock, flags);
return err;
}
qset = get_qset(whc, urb, GFP_ATOMIC);
if (qset == NULL)
err = -ENOMEM;
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list && !qset->remove)
qset_insert_in_sw_list(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
spin_unlock_irqrestore(&whc->lock, flags);
if (!err)
queue_work(whc->workqueue, &whc->periodic_work);
return err;
}
/**
* pzl_urb_dequeue - remove an URB (qset) from the periodic list
* @whc: the WHCI host controller
* @urb: the URB to dequeue
* @status: the current status of the URB
*
* URBs that do yet have qTDs can simply be removed from the software
* queue, otherwise the qset must be removed so the qTDs can be safely
* removed.
*/
int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
{
struct whc_urb *wurb = urb->hcpriv;
struct whc_qset *qset = wurb->qset;
struct whc_std *std, *t;
bool has_qtd = false;
int ret;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
if (ret < 0)
goto out;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb) {
if (std->qtd)
has_qtd = true;
qset_free_std(whc, std);
} else
std->qtd = NULL; /* so this std is re-added when the qset is */
}
if (has_qtd) {
pzl_qset_remove(whc, qset);
update_pzl_hw_view(whc);
wurb->status = status;
wurb->is_async = false;
queue_work(whc->workqueue, &wurb->dequeue_work);
} else
qset_remove_urb(whc, qset, urb, status);
out:
spin_unlock_irqrestore(&whc->lock, flags);
return ret;
}
/**
* pzl_qset_delete - delete a qset from the PZL
*/
void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 1;
queue_work(whc->workqueue, &whc->periodic_work);
qset_delete(whc, qset);
}
/**
* pzl_init - initialize the periodic zone list
* @whc: the WHCI host controller
*/
int pzl_init(struct whc *whc)
{
int i;
whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
&whc->pz_list_dma, GFP_KERNEL);
if (whc->pz_list == NULL)
return -ENOMEM;
/* Set T bit on all elements in PZL. */
for (i = 0; i < 16; i++)
whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
return 0;
}
/**
* pzl_clean_up - free PZL resources
* @whc: the WHCI host controller
*
* The PZL is stopped and empty.
*/
void pzl_clean_up(struct whc *whc)
{
if (whc->pz_list)
dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
whc->pz_list_dma);
}

View File

@ -1,831 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) qset management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
{
struct whc_qset *qset;
dma_addr_t dma;
qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
if (qset == NULL)
return NULL;
qset->qset_dma = dma;
qset->whc = whc;
INIT_LIST_HEAD(&qset->list_node);
INIT_LIST_HEAD(&qset->stds);
return qset;
}
/**
* qset_fill_qh - fill the static endpoint state in a qset's QHead
* @qset: the qset whose QH needs initializing with static endpoint
* state
* @urb: an urb for a transfer to this endpoint
*/
static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
{
struct usb_device *usb_dev = urb->dev;
struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
struct usb_wireless_ep_comp_descriptor *epcd;
bool is_out;
uint8_t phy_rate;
is_out = usb_pipeout(urb->pipe);
qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
if (epcd) {
qset->max_seq = epcd->bMaxSequence;
qset->max_burst = epcd->bMaxBurst;
} else {
qset->max_seq = 2;
qset->max_burst = 1;
}
/*
* Initial PHY rate is 53.3 Mbit/s for control endpoints or
* the maximum supported by the device for other endpoints
* (unless limited by the user).
*/
if (usb_pipecontrol(urb->pipe))
phy_rate = UWB_PHY_RATE_53;
else {
uint16_t phy_rates;
phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
phy_rate = fls(phy_rates) - 1;
if (phy_rate > whc->wusbhc.phy_rate)
phy_rate = whc->wusbhc.phy_rate;
}
qset->qh.info1 = cpu_to_le32(
QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
| usb_pipe_to_qh_type(urb->pipe)
| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
| QH_INFO1_MAX_PKT_LEN(qset->max_packet)
);
qset->qh.info2 = cpu_to_le32(
QH_INFO2_BURST(qset->max_burst)
| QH_INFO2_DBP(0)
| QH_INFO2_MAX_COUNT(3)
| QH_INFO2_MAX_RETRY(3)
| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
);
/* FIXME: where can we obtain these Tx parameters from? Why
* doesn't the chip know what Tx power to use? It knows the Rx
* strength and can presumably guess the Tx power required
* from that? */
qset->qh.info3 = cpu_to_le32(
QH_INFO3_TX_RATE(phy_rate)
| QH_INFO3_TX_PWR(0) /* 0 == max power */
);
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
}
/**
* qset_clear - clear fields in a qset so it may be reinserted into a
* schedule.
*
* The sequence number and current window are not cleared (see
* qset_reset()).
*/
void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
qset->qh.scratch[1] = 0;
qset->qh.scratch[2] = 0;
memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
init_completion(&qset->remove_complete);
}
/**
* qset_reset - reset endpoint state in a qset.
*
* Clears the sequence number and current window. This qset must not
* be in the ASL or PZL.
*/
void qset_reset(struct whc *whc, struct whc_qset *qset)
{
qset->reset = 0;
qset->qh.status &= ~QH_STATUS_SEQ_MASK;
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
}
/**
* get_qset - get the qset for an async endpoint
*
* A new qset is created if one does not already exist.
*/
struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
gfp_t mem_flags)
{
struct whc_qset *qset;
qset = urb->ep->hcpriv;
if (qset == NULL) {
qset = qset_alloc(whc, mem_flags);
if (qset == NULL)
return NULL;
qset->ep = urb->ep;
urb->ep->hcpriv = qset;
qset_fill_qh(whc, qset, urb);
}
return qset;
}
void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 0;
list_del_init(&qset->list_node);
complete(&qset->remove_complete);
}
/**
* qset_add_qtds - add qTDs for an URB to a qset
*
* Returns true if the list (ASL/PZL) must be updated because (for a
* WHCI 0.95 controller) an activated qTD was pointed to be iCur.
*/
enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
{
struct whc_std *std;
enum whc_update update = 0;
list_for_each_entry(std, &qset->stds, list_node) {
struct whc_qtd *qtd;
uint32_t status;
if (qset->ntds >= WHCI_QSET_TD_MAX
|| (qset->pause_after_urb && std->urb != qset->pause_after_urb))
break;
if (std->qtd)
continue; /* already has a qTD */
qtd = std->qtd = &qset->qtd[qset->td_end];
/* Fill in setup bytes for control transfers. */
if (usb_pipecontrol(std->urb->pipe))
memcpy(qtd->setup, std->urb->setup_packet, 8);
status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
status |= QTD_STS_LAST_PKT;
/*
* For an IN transfer the iAlt field should be set so
* the h/w will automatically advance to the next
* transfer. However, if there are 8 or more TDs
* remaining in this transfer then iAlt cannot be set
* as it could point to somewhere in this transfer.
*/
if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
int ialt;
ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
status |= QTD_STS_IALT(ialt);
} else if (usb_pipein(std->urb->pipe))
qset->pause_after_urb = std->urb;
if (std->num_pointers)
qtd->options = cpu_to_le32(QTD_OPT_IOC);
else
qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
qtd->status = cpu_to_le32(status);
if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
update = WHC_UPDATE_UPDATED;
if (++qset->td_end >= WHCI_QSET_TD_MAX)
qset->td_end = 0;
qset->ntds++;
}
return update;
}
/**
* qset_remove_qtd - remove the first qTD from a qset.
*
* The qTD might be still active (if it's part of a IN URB that
* resulted in a short read) so ensure it's deactivated.
*/
static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
{
qset->qtd[qset->td_start].status = 0;
if (++qset->td_start >= WHCI_QSET_TD_MAX)
qset->td_start = 0;
qset->ntds--;
}
static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
{
struct scatterlist *sg;
void *bounce;
size_t remaining, offset;
bounce = std->bounce_buf;
remaining = std->len;
sg = std->bounce_sg;
offset = std->bounce_offset;
while (remaining) {
size_t len;
len = min(sg->length - offset, remaining);
memcpy(sg_virt(sg) + offset, bounce, len);
bounce += len;
remaining -= len;
offset += len;
if (offset >= sg->length) {
sg = sg_next(sg);
offset = 0;
}
}
}
/**
* qset_free_std - remove an sTD and free it.
* @whc: the WHCI host controller
* @std: the sTD to remove and free.
*/
void qset_free_std(struct whc *whc, struct whc_std *std)
{
list_del(&std->list_node);
if (std->bounce_buf) {
bool is_out = usb_pipeout(std->urb->pipe);
dma_addr_t dma_addr;
if (std->num_pointers)
dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
else
dma_addr = std->dma_addr;
dma_unmap_single(whc->wusbhc.dev, dma_addr,
std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (!is_out)
qset_copy_bounce_to_sg(whc, std);
kfree(std->bounce_buf);
}
if (std->pl_virt) {
if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
std->num_pointers * sizeof(struct whc_page_list_entry),
DMA_TO_DEVICE);
kfree(std->pl_virt);
std->pl_virt = NULL;
}
kfree(std);
}
/**
* qset_remove_qtds - remove an URB's qTDs (and sTDs).
*/
static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
struct urb *urb)
{
struct whc_std *std, *t;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb != urb)
break;
if (std->qtd != NULL)
qset_remove_qtd(whc, qset);
qset_free_std(whc, std);
}
}
/**
* qset_free_stds - free any remaining sTDs for an URB.
*/
static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
{
struct whc_std *std, *t;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb)
qset_free_std(qset->whc, std);
}
}
static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
{
dma_addr_t dma_addr = std->dma_addr;
dma_addr_t sp, ep;
size_t pl_len;
int p;
/* Short buffers don't need a page list. */
if (std->len <= WHCI_PAGE_SIZE) {
std->num_pointers = 0;
return 0;
}
sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
ep = dma_addr + std->len;
std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
std->pl_virt = kmalloc(pl_len, mem_flags);
if (std->pl_virt == NULL)
return -ENOMEM;
std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
kfree(std->pl_virt);
return -EFAULT;
}
for (p = 0; p < std->num_pointers; p++) {
std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
}
return 0;
}
/**
* urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
*/
static void urb_dequeue_work(struct work_struct *work)
{
struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
struct whc_qset *qset = wurb->qset;
struct whc *whc = qset->whc;
unsigned long flags;
if (wurb->is_async)
asl_update(whc, WUSBCMD_ASYNC_UPDATED
| WUSBCMD_ASYNC_SYNCED_DB
| WUSBCMD_ASYNC_QSET_RM);
else
pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
| WUSBCMD_PERIODIC_SYNCED_DB
| WUSBCMD_PERIODIC_QSET_RM);
spin_lock_irqsave(&whc->lock, flags);
qset_remove_urb(whc, qset, wurb->urb, wurb->status);
spin_unlock_irqrestore(&whc->lock, flags);
}
static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
struct urb *urb, gfp_t mem_flags)
{
struct whc_std *std;
std = kzalloc(sizeof(struct whc_std), mem_flags);
if (std == NULL)
return NULL;
std->urb = urb;
std->qtd = NULL;
INIT_LIST_HEAD(&std->list_node);
list_add_tail(&std->list_node, &qset->stds);
return std;
}
static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
gfp_t mem_flags)
{
size_t remaining;
struct scatterlist *sg;
int i;
int ntds = 0;
struct whc_std *std = NULL;
struct whc_page_list_entry *new_pl_virt;
dma_addr_t prev_end = 0;
size_t pl_len;
int p = 0;
remaining = urb->transfer_buffer_length;
for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
int num_pointers;
if (remaining == 0) {
break;
}
dma_addr = sg_dma_address(sg);
dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
while (dma_remaining) {
size_t dma_len;
/*
* We can use the previous std (if it exists) provided that:
* - the previous one ended on a page boundary.
* - the current one begins on a page boundary.
* - the previous one isn't full.
*
* If a new std is needed but the previous one
* was not a whole number of packets then this
* sg list cannot be mapped onto multiple
* qTDs. Return an error and let the caller
* sort it out.
*/
if (!std
|| (prev_end & (WHCI_PAGE_SIZE-1))
|| (dma_addr & (WHCI_PAGE_SIZE-1))
|| std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
if (std && std->len % qset->max_packet != 0)
return -EINVAL;
std = qset_new_std(whc, qset, urb, mem_flags);
if (std == NULL) {
return -ENOMEM;
}
ntds++;
p = 0;
}
dma_len = dma_remaining;
/*
* If the remainder of this element doesn't
* fit in a single qTD, limit the qTD to a
* whole number of packets. This allows the
* remainder to go into the next qTD.
*/
if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
* qset->max_packet - std->len;
}
std->len += dma_len;
std->ntds_remaining = -1; /* filled in later */
sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
ep = dma_addr + dma_len;
num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
std->num_pointers += num_pointers;
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
if (new_pl_virt == NULL) {
kfree(std->pl_virt);
std->pl_virt = NULL;
return -ENOMEM;
}
std->pl_virt = new_pl_virt;
for (;p < std->num_pointers; p++) {
std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
}
prev_end = dma_addr = ep;
dma_remaining -= dma_len;
remaining -= dma_len;
}
}
/* Now the number of stds is know, go back and fill in
std->ntds_remaining. */
list_for_each_entry(std, &qset->stds, list_node) {
if (std->ntds_remaining == -1) {
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
pl_len, DMA_TO_DEVICE);
if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
return -EFAULT;
std->ntds_remaining = ntds--;
}
}
return 0;
}
/**
* qset_add_urb_sg_linearize - add an urb with sg list, copying the data
*
* If the URB contains an sg list whose elements cannot be directly
* mapped to qTDs then the data must be transferred via bounce
* buffers.
*/
static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
struct urb *urb, gfp_t mem_flags)
{
bool is_out = usb_pipeout(urb->pipe);
size_t max_std_len;
size_t remaining;
int ntds = 0;
struct whc_std *std = NULL;
void *bounce = NULL;
struct scatterlist *sg;
int i;
/* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
max_std_len = qset->max_burst * qset->max_packet;
remaining = urb->transfer_buffer_length;
for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
if (remaining == 0) {
break;
}
sg_remaining = min_t(size_t, remaining, sg->length);
orig = sg_virt(sg);
while (sg_remaining) {
if (!std || std->len == max_std_len) {
std = qset_new_std(whc, qset, urb, mem_flags);
if (std == NULL)
return -ENOMEM;
std->bounce_buf = kmalloc(max_std_len, mem_flags);
if (std->bounce_buf == NULL)
return -ENOMEM;
std->bounce_sg = sg;
std->bounce_offset = orig - sg_virt(sg);
bounce = std->bounce_buf;
ntds++;
}
len = min(sg_remaining, max_std_len - std->len);
if (is_out)
memcpy(bounce, orig, len);
std->len += len;
std->ntds_remaining = -1; /* filled in later */
bounce += len;
orig += len;
sg_remaining -= len;
remaining -= len;
}
}
/*
* For each of the new sTDs, map the bounce buffers, create
* page lists (if necessary), and fill in std->ntds_remaining.
*/
list_for_each_entry(std, &qset->stds, list_node) {
if (std->ntds_remaining != -1)
continue;
std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
return -EFAULT;
if (qset_fill_page_list(whc, std, mem_flags) < 0)
return -ENOMEM;
std->ntds_remaining = ntds--;
}
return 0;
}
/**
* qset_add_urb - add an urb to the qset's queue.
*
* The URB is chopped into sTDs, one for each qTD that will required.
* At least one qTD (and sTD) is required even if the transfer has no
* data (e.g., for some control transfers).
*/
int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
gfp_t mem_flags)
{
struct whc_urb *wurb;
int remaining = urb->transfer_buffer_length;
u64 transfer_dma = urb->transfer_dma;
int ntds_remaining;
int ret;
wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
if (wurb == NULL)
goto err_no_mem;
urb->hcpriv = wurb;
wurb->qset = qset;
wurb->urb = urb;
INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
if (urb->num_sgs) {
ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
if (ret == -EINVAL) {
qset_free_stds(qset, urb);
ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
}
if (ret < 0)
goto err_no_mem;
return 0;
}
ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
if (ntds_remaining == 0)
ntds_remaining = 1;
while (ntds_remaining) {
struct whc_std *std;
size_t std_len;
std_len = remaining;
if (std_len > QTD_MAX_XFER_SIZE)
std_len = QTD_MAX_XFER_SIZE;
std = qset_new_std(whc, qset, urb, mem_flags);
if (std == NULL)
goto err_no_mem;
std->dma_addr = transfer_dma;
std->len = std_len;
std->ntds_remaining = ntds_remaining;
if (qset_fill_page_list(whc, std, mem_flags) < 0)
goto err_no_mem;
ntds_remaining--;
remaining -= std_len;
transfer_dma += std_len;
}
return 0;
err_no_mem:
qset_free_stds(qset, urb);
return -ENOMEM;
}
/**
* qset_remove_urb - remove an URB from the urb queue.
*
* The URB is returned to the USB subsystem.
*/
void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
struct urb *urb, int status)
{
struct wusbhc *wusbhc = &whc->wusbhc;
struct whc_urb *wurb = urb->hcpriv;
usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
/* Drop the lock as urb->complete() may enqueue another urb. */
spin_unlock(&whc->lock);
wusbhc_giveback_urb(wusbhc, urb, status);
spin_lock(&whc->lock);
kfree(wurb);
}
/**
* get_urb_status_from_qtd - get the completed urb status from qTD status
* @urb: completed urb
* @status: qTD status
*/
static int get_urb_status_from_qtd(struct urb *urb, u32 status)
{
if (status & QTD_STS_HALTED) {
if (status & QTD_STS_DBE)
return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
else if (status & QTD_STS_BABBLE)
return -EOVERFLOW;
else if (status & QTD_STS_RCE)
return -ETIME;
return -EPIPE;
}
if (usb_pipein(urb->pipe)
&& (urb->transfer_flags & URB_SHORT_NOT_OK)
&& urb->actual_length < urb->transfer_buffer_length)
return -EREMOTEIO;
return 0;
}
/**
* process_inactive_qtd - process an inactive (but not halted) qTD.
*
* Update the urb with the transfer bytes from the qTD, if the urb is
* completely transferred or (in the case of an IN only) the LPF is
* set, then the transfer is complete and the urb should be returned
* to the system.
*/
void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
struct whc_qtd *qtd)
{
struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
struct urb *urb = std->urb;
uint32_t status;
bool complete;
status = le32_to_cpu(qtd->status);
urb->actual_length += std->len - QTD_STS_TO_LEN(status);
if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
complete = true;
else
complete = whc_std_last(std);
qset_remove_qtd(whc, qset);
qset_free_std(whc, std);
/*
* Transfers for this URB are complete? Then return it to the
* USB subsystem.
*/
if (complete) {
qset_remove_qtds(whc, qset, urb);
qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
/*
* If iAlt isn't valid then the hardware didn't
* advance iCur. Adjust the start and end pointers to
* match iCur.
*/
if (!(status & QTD_STS_IALT_VALID))
qset->td_start = qset->td_end
= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
qset->pause_after_urb = NULL;
}
}
/**
* process_halted_qtd - process a qset with a halted qtd
*
* Remove all the qTDs for the failed URB and return the failed URB to
* the USB subsystem. Then remove all other qTDs so the qset can be
* removed.
*
* FIXME: this is the point where rate adaptation can be done. If a
* transfer failed because it exceeded the maximum number of retries
* then it could be reactivated with a slower rate without having to
* remove the qset.
*/
void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
struct whc_qtd *qtd)
{
struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
struct urb *urb = std->urb;
int urb_status;
urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
qset_remove_qtds(whc, qset, urb);
qset_remove_urb(whc, qset, urb, urb_status);
list_for_each_entry(std, &qset->stds, list_node) {
if (qset->ntds == 0)
break;
qset_remove_qtd(whc, qset);
std->qtd = NULL;
}
qset->remove = 1;
}
void qset_free(struct whc *whc, struct whc_qset *qset)
{
dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
}
/**
* qset_delete - wait for a qset to be unused, then free it.
*/
void qset_delete(struct whc *whc, struct whc_qset *qset)
{
wait_for_completion(&qset->remove_complete);
qset_free(whc, qset);
}

View File

@ -1,202 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) private header.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#ifndef __WHCD_H
#define __WHCD_H
#include <linux/workqueue.h>
#include "../../../uwb/include/whci.h"
#include "../../../uwb/include/umc.h"
#include "whci-hc.h"
/* Generic command timeout. */
#define WHC_GENCMD_TIMEOUT_MS 100
struct whc_dbg;
struct whc {
struct wusbhc wusbhc;
struct umc_dev *umc;
resource_size_t base_phys;
void __iomem *base;
int irq;
u8 n_devices;
u8 n_keys;
u8 n_mmc_ies;
u64 *pz_list;
struct dn_buf_entry *dn_buf;
struct di_buf_entry *di_buf;
dma_addr_t pz_list_dma;
dma_addr_t dn_buf_dma;
dma_addr_t di_buf_dma;
spinlock_t lock;
struct mutex mutex;
void * gen_cmd_buf;
dma_addr_t gen_cmd_buf_dma;
wait_queue_head_t cmd_wq;
struct workqueue_struct *workqueue;
struct work_struct dn_work;
struct dma_pool *qset_pool;
struct list_head async_list;
struct list_head async_removed_list;
wait_queue_head_t async_list_wq;
struct work_struct async_work;
struct list_head periodic_list[5];
struct list_head periodic_removed_list;
wait_queue_head_t periodic_list_wq;
struct work_struct periodic_work;
struct whc_dbg *dbg;
};
#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
/**
* struct whc_std - a software TD.
* @urb: the URB this sTD is for.
* @offset: start of the URB's data for this TD.
* @len: the length of data in the associated TD.
* @ntds_remaining: number of TDs (starting from this one) in this transfer.
*
* @bounce_buf: a bounce buffer if the std was from an urb with a sg
* list that could not be mapped to qTDs directly.
* @bounce_sg: the first scatterlist element bounce_buf is for.
* @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
*
* Queued URBs may require more TDs than are available in a qset so we
* use a list of these "software TDs" (sTDs) to hold per-TD data.
*/
struct whc_std {
struct urb *urb;
size_t len;
int ntds_remaining;
struct whc_qtd *qtd;
struct list_head list_node;
int num_pointers;
dma_addr_t dma_addr;
struct whc_page_list_entry *pl_virt;
void *bounce_buf;
struct scatterlist *bounce_sg;
unsigned bounce_offset;
};
/**
* struct whc_urb - per URB host controller structure.
* @urb: the URB this struct is for.
* @qset: the qset associated to the URB.
* @dequeue_work: the work to remove the URB when dequeued.
* @is_async: the URB belongs to async sheduler or not.
* @status: the status to be returned when calling wusbhc_giveback_urb.
*/
struct whc_urb {
struct urb *urb;
struct whc_qset *qset;
struct work_struct dequeue_work;
bool is_async;
int status;
};
/**
* whc_std_last - is this sTD the URB's last?
* @std: the sTD to check.
*/
static inline bool whc_std_last(struct whc_std *std)
{
return std->ntds_remaining <= 1;
}
enum whc_update {
WHC_UPDATE_ADDED = 0x01,
WHC_UPDATE_REMOVED = 0x02,
WHC_UPDATE_UPDATED = 0x04,
};
/* init.c */
int whc_init(struct whc *whc);
void whc_clean_up(struct whc *whc);
/* hw.c */
void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
void whc_hw_error(struct whc *whc, const char *reason);
/* wusb.c */
int whc_wusbhc_start(struct wusbhc *wusbhc);
void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
u8 handle, struct wuie_hdr *wuie);
int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
const void *ptk, size_t key_size);
int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
const void *gtk, size_t key_size);
int whc_set_cluster_id(struct whc *whc, u8 bcid);
/* int.c */
irqreturn_t whc_int_handler(struct usb_hcd *hcd);
void whc_dn_work(struct work_struct *work);
/* asl.c */
void asl_start(struct whc *whc);
void asl_stop(struct whc *whc);
int asl_init(struct whc *whc);
void asl_clean_up(struct whc *whc);
int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
void scan_async_work(struct work_struct *work);
/* pzl.c */
int pzl_init(struct whc *whc);
void pzl_clean_up(struct whc *whc);
void pzl_start(struct whc *whc);
void pzl_stop(struct whc *whc);
int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
void scan_periodic_work(struct work_struct *work);
/* qset.c */
struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
void qset_free(struct whc *whc, struct whc_qset *qset);
struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
void qset_delete(struct whc *whc, struct whc_qset *qset);
void qset_clear(struct whc *whc, struct whc_qset *qset);
void qset_reset(struct whc *whc, struct whc_qset *qset);
int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
gfp_t mem_flags);
void qset_free_std(struct whc *whc, struct whc_std *std);
void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
struct urb *urb, int status);
void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
struct whc_qtd *qtd);
void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
struct whc_qtd *qtd);
enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
void pzl_update(struct whc *whc, uint32_t wusbcmd);
void asl_update(struct whc *whc, uint32_t wusbcmd);
/* debug.c */
void whc_dbg_init(struct whc *whc);
void whc_dbg_clean_up(struct whc *whc);
#endif /* #ifndef __WHCD_H */

View File

@ -1,401 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) data structures.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#ifndef _WHCI_WHCI_HC_H
#define _WHCI_WHCI_HC_H
#include <linux/list.h>
/**
* WHCI_PAGE_SIZE - page size use by WHCI
*
* WHCI assumes that host system uses pages of 4096 octets.
*/
#define WHCI_PAGE_SIZE 4096
/**
* QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
* qtd.
*
* This is 2^20 - 1.
*/
#define QTD_MAX_XFER_SIZE 1048575
/**
* struct whc_qtd - Queue Element Transfer Descriptors (qTD)
*
* This describes the data for a bulk, control or interrupt transfer.
*
* [WHCI] section 3.2.4
*/
struct whc_qtd {
__le32 status; /*< remaining transfer len and transfer status */
__le32 options;
__le64 page_list_ptr; /*< physical pointer to data buffer page list*/
__u8 setup[8]; /*< setup data for control transfers */
} __attribute__((packed));
#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
#define QTD_STS_HALTED (1 << 30) /* transfer halted */
#define QTD_STS_DBE (1 << 29) /* data buffer error */
#define QTD_STS_BABBLE (1 << 28) /* babble detected */
#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
/**
* struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
*
* This describes the data and other parameters for an isochronous
* transfer.
*
* [WHCI] section 3.2.5
*/
struct whc_itd {
__le16 presentation_time; /*< presentation time for OUT transfers */
__u8 num_segments; /*< number of data segments in segment list */
__u8 status; /*< command execution status */
__le32 options; /*< misc transfer options */
__le64 page_list_ptr; /*< physical pointer to data buffer page list */
__le64 seg_list_ptr; /*< physical pointer to segment list */
} __attribute__((packed));
#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
#define ITD_STS_DBE (1 << 5) /* data buffer error */
#define ITD_STS_BABBLE (1 << 4) /* babble detected */
#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
/**
* Page list entry.
*
* A TD's page list must contain sufficient page list entries for the
* total data length in the TD.
*
* [WHCI] section 3.2.4.3
*/
struct whc_page_list_entry {
__le64 buf_ptr; /*< physical pointer to buffer */
} __attribute__((packed));
/**
* struct whc_seg_list_entry - Segment list entry.
*
* Describes a portion of the data buffer described in the containing
* qTD's page list.
*
* seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
* + qtd->seg_list_ptr[seg].offset;
*
* Segments can't cross page boundries.
*
* [WHCI] section 3.2.5.5
*/
struct whc_seg_list_entry {
__le16 len; /*< segment length */
__u8 idx; /*< index into page list */
__u8 status; /*< segment status */
__le16 offset; /*< 12 bit offset into page */
} __attribute__((packed));
/**
* struct whc_qhead - endpoint and status information for a qset.
*
* [WHCI] section 3.2.6
*/
struct whc_qhead {
__le64 link; /*< next qset in list */
__le32 info1;
__le32 info2;
__le32 info3;
__le16 status;
__le16 err_count; /*< transaction error count */
__le32 cur_window;
__le32 scratch[3]; /*< h/w scratch area */
union {
struct whc_qtd qtd;
struct whc_itd itd;
} overlay;
} __attribute__((packed));
#define QH_LINK_PTR_MASK (~0x03Full)
#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
#define QH_STATUS_FLOW_CTRL (1 << 15)
#define QH_STATUS_ICUR(i) ((i) << 5)
#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
#define QH_STATUS_SEQ_MASK 0x1f
/**
* usb_pipe_to_qh_type - USB core pipe type to QH transfer type
*
* Returns the QH type field for a USB core pipe type.
*/
static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
{
static const unsigned type[] = {
[PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
[PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
[PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
[PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
};
return type[usb_pipetype(pipe)];
}
/**
* Maxiumum number of TDs in a qset.
*/
#define WHCI_QSET_TD_MAX 8
/**
* struct whc_qset - WUSB data transfers to a specific endpoint
* @qh: the QHead of this qset
* @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
* transfers)
* @itd: up to 8 iTDs (for qsets for isochronous transfers)
* @qset_dma: DMA address for this qset
* @whc: WHCI HC this qset is for
* @ep: endpoint
* @stds: list of sTDs queued to this qset
* @ntds: number of qTDs queued (not necessarily the same as nTDs
* field in the QH)
* @td_start: index of the first qTD in the list
* @td_end: index of next free qTD in the list (provided
* ntds < WHCI_QSET_TD_MAX)
*
* Queue Sets (qsets) are added to the asynchronous schedule list
* (ASL) or the periodic zone list (PZL).
*
* qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
* Each TD may refer to at most 1 MiB of data. If a single transfer
* has > 8MiB of data, TDs can be reused as they are completed since
* the TD list is used as a circular buffer. Similarly, several
* (smaller) transfers may be queued in a qset.
*
* WHCI controllers may cache portions of the qsets in the ASL and
* PZL, requiring the WHCD to inform the WHC that the lists have been
* updated (fields changed or qsets inserted or removed). For safe
* insertion and removal of qsets from the lists the schedule must be
* stopped to avoid races in updating the QH link pointers.
*
* Since the HC is free to execute qsets in any order, all transfers
* to an endpoint should use the same qset to ensure transfers are
* executed in the order they're submitted.
*
* [WHCI] section 3.2.3
*/
struct whc_qset {
struct whc_qhead qh;
union {
struct whc_qtd qtd[WHCI_QSET_TD_MAX];
struct whc_itd itd[WHCI_QSET_TD_MAX];
};
/* private data for WHCD */
dma_addr_t qset_dma;
struct whc *whc;
struct usb_host_endpoint *ep;
struct list_head stds;
int ntds;
int td_start;
int td_end;
struct list_head list_node;
unsigned in_sw_list:1;
unsigned in_hw_list:1;
unsigned remove:1;
unsigned reset:1;
struct urb *pause_after_urb;
struct completion remove_complete;
uint16_t max_packet;
uint8_t max_burst;
uint8_t max_seq;
};
static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
{
if (target)
*ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
else
*ptr = QH_LINK_T;
}
/**
* struct di_buf_entry - Device Information (DI) buffer entry.
*
* There's one of these per connected device.
*/
struct di_buf_entry {
__le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
__le32 addr_sec_info; /*< addressing and security info */
__le32 reserved[7];
} __attribute__((packed));
#define WHC_DI_SECURE (1 << 31)
#define WHC_DI_DISABLE (1 << 30)
#define WHC_DI_KEY_IDX(k) ((k) << 8)
#define WHC_DI_KEY_IDX_MASK 0x0000ff00
#define WHC_DI_DEV_ADDR(a) ((a) << 0)
#define WHC_DI_DEV_ADDR_MASK 0x000000ff
/**
* struct dn_buf_entry - Device Notification (DN) buffer entry.
*
* [WHCI] section 3.2.8
*/
struct dn_buf_entry {
__u8 msg_size; /*< number of octets of valid DN data */
__u8 reserved1;
__u8 src_addr; /*< source address */
__u8 status; /*< buffer entry status */
__le32 tkid; /*< TKID for source device, valid if secure bit is set */
__u8 dn_data[56]; /*< up to 56 octets of DN data */
} __attribute__((packed));
#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
data. [WHCI] section 2.4.7. */
#define WHC_GEN_CMD_DATA_LEN 256
/*
* HC registers.
*
* [WHCI] section 2.4
*/
#define WHCIVERSION 0x00
#define WHCSPARAMS 0x04
# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
#define WUSBCMD 0x08
# define WUSBCMD_BCID(b) ((b) << 16)
# define WUSBCMD_BCID_MASK (0xff << 16)
# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
# define WUSBCMD_WUSBSI(s) ((s) << 8)
# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
# define WUSBCMD_ASYNC_UPDATED (1 << 5)
# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
# define WUSBCMD_ASYNC_EN (1 << 3)
# define WUSBCMD_PERIODIC_EN (1 << 2)
# define WUSBCMD_WHCRESET (1 << 1)
# define WUSBCMD_RUN (1 << 0)
#define WUSBSTS 0x0c
# define WUSBSTS_ASYNC_SCHED (1 << 15)
# define WUSBSTS_PERIODIC_SCHED (1 << 14)
# define WUSBSTS_DNTS_SCHED (1 << 13)
# define WUSBSTS_HCHALTED (1 << 12)
# define WUSBSTS_GEN_CMD_DONE (1 << 9)
# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
# define WUSBSTS_HOST_ERR (1 << 5)
# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
# define WUSBSTS_DNTS_INT (1 << 2)
# define WUSBSTS_ERR_INT (1 << 1)
# define WUSBSTS_INT (1 << 0)
# define WUSBSTS_INT_MASK 0x3ff
#define WUSBINTR 0x10
# define WUSBINTR_GEN_CMD_DONE (1 << 9)
# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
# define WUSBINTR_HOST_ERR (1 << 5)
# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
# define WUSBINTR_DNTS_INT (1 << 2)
# define WUSBINTR_ERR_INT (1 << 1)
# define WUSBINTR_INT (1 << 0)
# define WUSBINTR_ALL 0x3ff
#define WUSBGENCMDSTS 0x14
# define WUSBGENCMDSTS_ACTIVE (1 << 31)
# define WUSBGENCMDSTS_ERROR (1 << 24)
# define WUSBGENCMDSTS_IOC (1 << 23)
# define WUSBGENCMDSTS_MMCIE_ADD 0x01
# define WUSBGENCMDSTS_MMCIE_RM 0x02
# define WUSBGENCMDSTS_SET_MAS 0x03
# define WUSBGENCMDSTS_CHAN_STOP 0x04
# define WUSBGENCMDSTS_RWP_EN 0x05
#define WUSBGENCMDPARAMS 0x18
#define WUSBGENADDR 0x20
#define WUSBASYNCLISTADDR 0x28
#define WUSBDNTSBUFADDR 0x30
#define WUSBDEVICEINFOADDR 0x38
#define WUSBSETSECKEYCMD 0x40
# define WUSBSETSECKEYCMD_SET (1 << 31)
# define WUSBSETSECKEYCMD_ERASE (1 << 30)
# define WUSBSETSECKEYCMD_GTK (1 << 8)
# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
#define WUSBTKID 0x44
#define WUSBSECKEY 0x48
#define WUSBPERIODICLISTBASE 0x58
#define WUSBMASINDEX 0x60
#define WUSBDNTSCTRL 0x64
# define WUSBDNTSCTRL_ACTIVE (1 << 31)
# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
#define WUSBTIME 0x68
# define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff
#define WUSBBPST 0x6c
#define WUSBDIBUPDATED 0x70
#endif /* #ifndef _WHCI_WHCI_HC_H */

View File

@ -1,210 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) WUSB operations.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include "../../../uwb/include/umc.h"
#include "../../wusbhc.h"
#include "whcd.h"
static int whc_update_di(struct whc *whc, int idx)
{
int offset = idx / 32;
u32 bit = 1 << (idx % 32);
le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
return whci_wait_for(&whc->umc->dev,
whc->base + WUSBDIBUPDATED + offset, bit, 0,
100, "DI update");
}
/*
* WHCI starts MMCs based on there being a valid GTK so these need
* only start/stop the asynchronous and periodic schedules and send a
* channel stop command.
*/
int whc_wusbhc_start(struct wusbhc *wusbhc)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
asl_start(whc);
pzl_start(whc);
return 0;
}
void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
u32 stop_time, now_time;
int ret;
pzl_stop(whc);
asl_stop(whc);
now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK;
stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff;
ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0);
if (ret == 0)
msleep(delay);
}
int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
u8 handle, struct wuie_hdr *wuie)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
u32 params;
params = (interval << 24)
| (repeat_cnt << 16)
| (wuie->bLength << 8)
| handle;
return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
}
int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
u32 params;
params = handle;
return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
}
int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
if (stream_index >= 0)
whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
}
int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
int idx = wusb_dev->port_idx;
struct di_buf_entry *di = &whc->di_buf[idx];
int ret;
mutex_lock(&whc->mutex);
uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
ret = whc_update_di(whc, idx);
mutex_unlock(&whc->mutex);
return ret;
}
/*
* Set the number of Device Notification Time Slots (DNTS) and enable
* device notifications.
*/
int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
u32 dntsctrl;
dntsctrl = WUSBDNTSCTRL_ACTIVE
| WUSBDNTSCTRL_INTERVAL(interval)
| WUSBDNTSCTRL_SLOTS(slots);
le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
return 0;
}
static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
const void *key, size_t key_size, bool is_gtk)
{
uint32_t setkeycmd;
uint32_t seckey[4];
int i;
int ret;
memcpy(seckey, key, key_size);
setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
if (is_gtk)
setkeycmd |= WUSBSETSECKEYCMD_GTK;
le_writel(tkid, whc->base + WUSBTKID);
for (i = 0; i < 4; i++)
le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
WUSBSETSECKEYCMD_SET, 0, 100, "set key");
return ret;
}
/**
* whc_set_ptk - set the PTK to use for a device.
*
* The index into the key table for this PTK is the same as the
* device's port index.
*/
int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
const void *ptk, size_t key_size)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
struct di_buf_entry *di = &whc->di_buf[port_idx];
int ret;
mutex_lock(&whc->mutex);
if (ptk) {
ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
if (ret)
goto out;
di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
} else
di->addr_sec_info &= ~WHC_DI_SECURE;
ret = whc_update_di(whc, port_idx);
out:
mutex_unlock(&whc->mutex);
return ret;
}
/**
* whc_set_gtk - set the GTK for subsequent broadcast packets
*
* The GTK is stored in the last entry in the key table (the previous
* N_DEVICES entries are for the per-device PTKs).
*/
int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
const void *gtk, size_t key_size)
{
struct whc *whc = wusbhc_to_whc(wusbhc);
int ret;
mutex_lock(&whc->mutex);
ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
mutex_unlock(&whc->mutex);
return ret;
}
int whc_set_cluster_id(struct whc *whc, u8 bcid)
{
whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
return 0;
}

View File

@ -1,151 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB - Cable Based Association
*
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
*/
#ifndef __LINUX_USB_ASSOCIATION_H
#define __LINUX_USB_ASSOCIATION_H
/*
* Association attributes
*
* Association Models Supplement to WUSB 1.0 T[3-1]
*
* Each field in the structures has it's ID, it's length and then the
* value. This is the actual definition of the field's ID and its
* length.
*/
struct wusb_am_attr {
__u8 id;
__u8 len;
};
/* Different fields defined by the spec */
#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
/* CBAF Control Requests (AMS1.0[T4-1] */
enum {
CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
CBAF_REQ_GET_ASSOCIATION_REQUEST,
CBAF_REQ_SET_ASSOCIATION_RESPONSE
};
/*
* CBAF USB-interface defitions
*
* No altsettings, one optional interrupt endpoint.
*/
enum {
CBAF_IFACECLASS = 0xef,
CBAF_IFACESUBCLASS = 0x03,
CBAF_IFACEPROTOCOL = 0x01,
};
/* Association Information (AMS1.0[T4-3]) */
struct wusb_cbaf_assoc_info {
__le16 Length;
__u8 NumAssociationRequests;
__le16 Flags;
__u8 AssociationRequestsArray[];
} __attribute__((packed));
/* Association Request (AMS1.0[T4-4]) */
struct wusb_cbaf_assoc_request {
__u8 AssociationDataIndex;
__u8 Reserved;
__le16 AssociationTypeId;
__le16 AssociationSubTypeId;
__le32 AssociationTypeInfoSize;
} __attribute__((packed));
enum {
AR_TYPE_WUSB = 0x0001,
AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
AR_TYPE_WUSB_ASSOCIATE = 0x0001,
};
/* Association Attribute header (AMS1.0[3.8]) */
struct wusb_cbaf_attr_hdr {
__le16 id;
__le16 len;
} __attribute__((packed));
/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
struct wusb_cbaf_host_info {
struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
__le16 AssociationTypeId;
struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
__le16 AssociationSubTypeId;
struct wusb_cbaf_attr_hdr CHID_hdr;
struct wusb_ckhdid CHID;
struct wusb_cbaf_attr_hdr LangID_hdr;
__le16 LangID;
struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
__u8 HostFriendlyName[];
} __attribute__((packed));
/* Device Info (AMS1.0[T4-8])
*
* I still don't get this tag'n'header stuff for each goddamn
* field...
*/
struct wusb_cbaf_device_info {
struct wusb_cbaf_attr_hdr Length_hdr;
__le32 Length;
struct wusb_cbaf_attr_hdr CDID_hdr;
struct wusb_ckhdid CDID;
struct wusb_cbaf_attr_hdr BandGroups_hdr;
__le16 BandGroups;
struct wusb_cbaf_attr_hdr LangID_hdr;
__le16 LangID;
struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
__u8 DeviceFriendlyName[];
} __attribute__((packed));
/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
struct wusb_cbaf_cc_data {
struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
__le16 AssociationTypeId;
struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
__le16 AssociationSubTypeId;
struct wusb_cbaf_attr_hdr Length_hdr;
__le32 Length;
struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
struct wusb_ckhdid CHID;
struct wusb_ckhdid CDID;
struct wusb_ckhdid CK;
struct wusb_cbaf_attr_hdr BandGroups_hdr;
__le16 BandGroups;
} __attribute__((packed));
/* CC_DATA - Failure case (AMS1.0[T4-10]) */
struct wusb_cbaf_cc_data_fail {
struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
__le16 AssociationTypeId;
struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
__le16 AssociationSubTypeId;
struct wusb_cbaf_attr_hdr Length_hdr;
__le16 Length;
struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
__u32 AssociationStatus;
} __attribute__((packed));
#endif /* __LINUX_USB_ASSOCIATION_H */

View File

@ -1,304 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Wire Adapter constants and structures.
*
* Copyright (C) 2005-2006 Intel Corporation.
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
* FIXME: organize properly, group logically
*
* All the event structures are defined in uwb/spec.h, as they are
* common to the WHCI and WUSB radio control interfaces.
*
* References:
* [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
*/
#ifndef __LINUX_USB_WUSB_WA_H
#define __LINUX_USB_WUSB_WA_H
/**
* Radio Command Request for the Radio Control Interface
*
* Radio Control Interface command and event codes are the same as
* WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
*/
enum {
WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
};
/* Wireless Adapter Requests ([WUSB] table 8-51) */
enum {
WUSB_REQ_ADD_MMC_IE = 20,
WUSB_REQ_REMOVE_MMC_IE = 21,
WUSB_REQ_SET_NUM_DNTS = 22,
WUSB_REQ_SET_CLUSTER_ID = 23,
WUSB_REQ_SET_DEV_INFO = 24,
WUSB_REQ_GET_TIME = 25,
WUSB_REQ_SET_STREAM_IDX = 26,
WUSB_REQ_SET_WUSB_MAS = 27,
WUSB_REQ_CHAN_STOP = 28,
};
/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
enum {
WUSB_TIME_ADJ = 0,
WUSB_TIME_BPST = 1,
WUSB_TIME_WUSB = 2,
};
enum {
WA_ENABLE = 0x01,
WA_RESET = 0x02,
RPIPE_PAUSE = 0x1,
RPIPE_STALL = 0x2,
};
/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
enum {
WA_STATUS_ENABLED = 0x01,
WA_STATUS_RESETTING = 0x02
};
enum rpipe_crs {
RPIPE_CRS_CTL = 0x01,
RPIPE_CRS_ISO = 0x02,
RPIPE_CRS_BULK = 0x04,
RPIPE_CRS_INTR = 0x08
};
/**
* RPipe descriptor ([WUSB] section 8.5.2.11)
*
* FIXME: explain rpipes
*/
struct usb_rpipe_descriptor {
u8 bLength;
u8 bDescriptorType;
__le16 wRPipeIndex;
__le16 wRequests;
__le16 wBlocks; /* rw if 0 */
__le16 wMaxPacketSize; /* rw */
union {
u8 dwa_bHSHubAddress; /* rw: DWA. */
u8 hwa_bMaxBurst; /* rw: HWA. */
};
union {
u8 dwa_bHSHubPort; /* rw: DWA. */
u8 hwa_bDeviceInfoIndex; /* rw: HWA. */
};
u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
union {
u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */
u8 hwa_reserved; /* rw: HWA. */
};
u8 bEndpointAddress; /* rw: Target EP address */
u8 bDataSequence; /* ro: Current Data sequence */
__le32 dwCurrentWindow; /* ro */
u8 bMaxDataSequence; /* ro?: max supported seq */
u8 bInterval; /* rw: */
u8 bOverTheAirInterval; /* rw: */
u8 bmAttribute; /* ro? */
u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
u8 bmRetryOptions; /* rw? */
__le16 wNumTransactionErrors; /* rw */
} __attribute__ ((packed));
/**
* Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
*
* These are the notifications coming on the notification endpoint of
* an HWA and a DWA.
*/
enum wa_notif_type {
DWA_NOTIF_RWAKE = 0x91,
DWA_NOTIF_PORTSTATUS = 0x92,
WA_NOTIF_TRANSFER = 0x93,
HWA_NOTIF_BPST_ADJ = 0x94,
HWA_NOTIF_DN = 0x95,
};
/**
* Wire Adapter notification header
*
* Notifications coming from a wire adapter use a common header
* defined in [WUSB] sections 8.4.5 & 8.5.4.
*/
struct wa_notif_hdr {
u8 bLength;
u8 bNotifyType; /* enum wa_notif_type */
} __packed;
/**
* HWA DN Received notification [(WUSB] section 8.5.4.2)
*
* The DNData is specified in WUSB1.0[7.6]. For each device
* notification we received, we just need to dispatch it.
*
* @dndata: this is really an array of notifications, but all start
* with the same header.
*/
struct hwa_notif_dn {
struct wa_notif_hdr hdr;
u8 bSourceDeviceAddr; /* from errata 2005/07 */
u8 bmAttributes;
struct wusb_dn_hdr dndata[];
} __packed;
/* [WUSB] section 8.3.3 */
enum wa_xfer_type {
WA_XFER_TYPE_CTL = 0x80,
WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
WA_XFER_TYPE_ISO = 0x82,
WA_XFER_RESULT = 0x83,
WA_XFER_ABORT = 0x84,
WA_XFER_ISO_PACKET_INFO = 0xA0,
WA_XFER_ISO_PACKET_STATUS = 0xA1,
};
/* [WUSB] section 8.3.3 */
struct wa_xfer_hdr {
u8 bLength; /* 0x18 */
u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
__le16 wRPipe; /* RPipe index */
__le32 dwTransferID; /* Host-assigned ID */
__le32 dwTransferLength; /* Length of data to xfer */
u8 bTransferSegment;
} __packed;
struct wa_xfer_ctl {
struct wa_xfer_hdr hdr;
u8 bmAttribute;
__le16 wReserved;
struct usb_ctrlrequest baSetupData;
} __packed;
struct wa_xfer_bi {
struct wa_xfer_hdr hdr;
u8 bReserved;
__le16 wReserved;
} __packed;
/* [WUSB] section 8.5.5 */
struct wa_xfer_hwaiso {
struct wa_xfer_hdr hdr;
u8 bReserved;
__le16 wPresentationTime;
__le32 dwNumOfPackets;
} __packed;
struct wa_xfer_packet_info_hwaiso {
__le16 wLength;
u8 bPacketType;
u8 bReserved;
__le16 PacketLength[0];
} __packed;
struct wa_xfer_packet_status_len_hwaiso {
__le16 PacketLength;
__le16 PacketStatus;
} __packed;
struct wa_xfer_packet_status_hwaiso {
__le16 wLength;
u8 bPacketType;
u8 bReserved;
struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
} __packed;
/* [WUSB] section 8.3.3.5 */
struct wa_xfer_abort {
u8 bLength;
u8 bRequestType;
__le16 wRPipe; /* RPipe index */
__le32 dwTransferID; /* Host-assigned ID */
} __packed;
/**
* WA Transfer Complete notification ([WUSB] section 8.3.3.3)
*
*/
struct wa_notif_xfer {
struct wa_notif_hdr hdr;
u8 bEndpoint;
u8 Reserved;
} __packed;
/** Transfer result basic codes [WUSB] table 8-15 */
enum {
WA_XFER_STATUS_SUCCESS,
WA_XFER_STATUS_HALTED,
WA_XFER_STATUS_DATA_BUFFER_ERROR,
WA_XFER_STATUS_BABBLE,
WA_XFER_RESERVED,
WA_XFER_STATUS_NOT_FOUND,
WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
WA_XFER_STATUS_TRANSACTION_ERROR,
WA_XFER_STATUS_ABORTED,
WA_XFER_STATUS_RPIPE_NOT_READY,
WA_XFER_INVALID_FORMAT,
WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
};
/** [WUSB] section 8.3.3.4 */
struct wa_xfer_result {
struct wa_notif_hdr hdr;
__le32 dwTransferID;
__le32 dwTransferLength;
u8 bTransferSegment;
u8 bTransferStatus;
__le32 dwNumOfPackets;
} __packed;
/**
* Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
*
* NOTE: u16 fields are read Little Endian from the hardware.
*
* @bNumPorts is the original max number of devices that the host can
* connect; we might chop this so the stack can handle
* it. In case you need to access it, use wusbhc->ports_max
* if it is a Wireless USB WA.
*/
struct usb_wa_descriptor {
u8 bLength;
u8 bDescriptorType;
__le16 bcdWAVersion;
u8 bNumPorts; /* don't use!! */
u8 bmAttributes; /* Reserved == 0 */
__le16 wNumRPipes;
__le16 wRPipeMaxBlock;
u8 bRPipeBlockSize;
u8 bPwrOn2PwrGood;
u8 bNumMMCIEs;
u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
} __packed;
/**
* HWA Device Information Buffer (WUSB1.0[T8.54])
*/
struct hwa_dev_info {
u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
u8 bDeviceAddress;
__le16 wPHYRates;
u8 bmDeviceAttribute;
} __packed;
#endif /* #ifndef __LINUX_USB_WUSB_WA_H */

View File

@ -1,362 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Standard Definitions
* Event Size Tables
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
* FIXME: organize properly, group logically
*
* All the event structures are defined in uwb/spec.h, as they are
* common to the WHCI and WUSB radio control interfaces.
*/
#ifndef __WUSB_H__
#define __WUSB_H__
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/usb/ch9.h>
#include <linux/param.h>
#include "../../uwb/include/spec.h"
/**
* WUSB Information Element header
*
* I don't know why, they decided to make it different to the MBOA MAC
* IE Header; beats me.
*/
struct wuie_hdr {
u8 bLength;
u8 bIEIdentifier;
} __attribute__((packed));
enum {
WUIE_ID_WCTA = 0x80,
WUIE_ID_CONNECTACK,
WUIE_ID_HOST_INFO,
WUIE_ID_CHANGE_ANNOUNCE,
WUIE_ID_DEVICE_DISCONNECT,
WUIE_ID_HOST_DISCONNECT,
WUIE_ID_KEEP_ALIVE = 0x89,
WUIE_ID_ISOCH_DISCARD,
WUIE_ID_RESET_DEVICE,
};
/**
* Maximum number of array elements in a WUSB IE.
*
* WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
* are "arrays" have to limited to 4 elements. So we define it
* like that to ease up and submit only the neeed size.
*/
#define WUIE_ELT_MAX 4
/**
* Wrapper for the data that defines a CHID, a CDID or a CK
*
* WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
* data. In order to avoid confusion and enforce types, we wrap it.
*
* Make it packed, as we use it in some hw definitions.
*/
struct wusb_ckhdid {
u8 data[16];
} __attribute__((packed));
static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
/**
* WUSB IE: Host Information (WUSB1.0[7.5.2])
*
* Used to provide information about the host to the Wireless USB
* devices in range (CHID can be used as an ASCII string).
*/
struct wuie_host_info {
struct wuie_hdr hdr;
__le16 attributes;
struct wusb_ckhdid CHID;
} __attribute__((packed));
/**
* WUSB IE: Connect Ack (WUSB1.0[7.5.1])
*
* Used to acknowledge device connect requests. See note for
* WUIE_ELT_MAX.
*/
struct wuie_connect_ack {
struct wuie_hdr hdr;
struct {
struct wusb_ckhdid CDID;
u8 bDeviceAddress; /* 0 means unused */
u8 bReserved;
} blk[WUIE_ELT_MAX];
} __attribute__((packed));
/**
* WUSB IE Host Information Element, Connect Availability
*
* WUSB1.0[7.5.2], bmAttributes description
*/
enum {
WUIE_HI_CAP_RECONNECT = 0,
WUIE_HI_CAP_LIMITED,
WUIE_HI_CAP_RESERVED,
WUIE_HI_CAP_ALL,
};
/**
* WUSB IE: Channel Stop (WUSB1.0[7.5.8])
*
* Tells devices the host is going to stop sending MMCs and will disappear.
*/
struct wuie_channel_stop {
struct wuie_hdr hdr;
u8 attributes;
u8 timestamp[3];
} __attribute__((packed));
/**
* WUSB IE: Keepalive (WUSB1.0[7.5.9])
*
* Ask device(s) to send keepalives.
*/
struct wuie_keep_alive {
struct wuie_hdr hdr;
u8 bDeviceAddress[WUIE_ELT_MAX];
} __attribute__((packed));
/**
* WUSB IE: Reset device (WUSB1.0[7.5.11])
*
* Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
* use it for one at the time...
*
* In any case, this request is a wee bit silly: why don't they target
* by address??
*/
struct wuie_reset {
struct wuie_hdr hdr;
struct wusb_ckhdid CDID;
} __attribute__((packed));
/**
* WUSB IE: Disconnect device (WUSB1.0[7.5.11])
*
* Tell device to disconnect; we can fit 4 addresses, but we only use
* it for one at the time...
*/
struct wuie_disconnect {
struct wuie_hdr hdr;
u8 bDeviceAddress;
u8 padding;
} __attribute__((packed));
/**
* WUSB IE: Host disconnect ([WUSB] section 7.5.5)
*
* Tells all connected devices to disconnect.
*/
struct wuie_host_disconnect {
struct wuie_hdr hdr;
} __attribute__((packed));
/**
* WUSB Device Notification header (WUSB1.0[7.6])
*/
struct wusb_dn_hdr {
u8 bType;
u8 notifdata[];
} __attribute__((packed));
/** Device Notification codes (WUSB1.0[Table 7-54]) */
enum WUSB_DN {
WUSB_DN_CONNECT = 0x01,
WUSB_DN_DISCONNECT = 0x02,
WUSB_DN_EPRDY = 0x03,
WUSB_DN_MASAVAILCHANGED = 0x04,
WUSB_DN_RWAKE = 0x05,
WUSB_DN_SLEEP = 0x06,
WUSB_DN_ALIVE = 0x07,
};
/** WUSB Device Notification Connect */
struct wusb_dn_connect {
struct wusb_dn_hdr hdr;
__le16 attributes;
struct wusb_ckhdid CDID;
} __attribute__((packed));
static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
{
return le16_to_cpu(dn->attributes) & 0xff;
}
static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
{
return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
}
static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
{
return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
}
/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
struct wusb_dn_alive {
struct wusb_dn_hdr hdr;
} __attribute__((packed));
/** Device is disconnecting (WUSB1.0[7.6.2]) */
struct wusb_dn_disconnect {
struct wusb_dn_hdr hdr;
} __attribute__((packed));
/* General constants */
enum {
WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
};
/*
* WUSB Crypto stuff (WUSB1.0[6])
*/
extern const char *wusb_et_name(u8);
/**
* WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
* the host or the device.
*/
static inline u8 wusb_key_index(int index, int type, int originator)
{
return (originator << 6) | (type << 4) | index;
}
#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
#define WUSB_KEY_INDEX_TYPE_ASSOC 1
#define WUSB_KEY_INDEX_TYPE_GTK 2
#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
/* bits 0-3 used for the key index. */
#define WUSB_KEY_INDEX_MAX 15
/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
struct aes_ccm_nonce {
u8 sfn[6]; /* Little Endian */
u8 tkid[3]; /* LE */
struct uwb_dev_addr dest_addr;
struct uwb_dev_addr src_addr;
} __attribute__((packed));
/* A CCM operation label, defined on WUSB1.0[6.5.x] */
struct aes_ccm_label {
u8 data[14];
} __attribute__((packed));
/*
* Input to the key derivation sequence defined in
* WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
* PRF function.
*/
struct wusb_keydvt_in {
u8 hnonce[16];
u8 dnonce[16];
} __attribute__((packed));
/*
* Output from the key derivation sequence defined in
* WUSB1.0[6.5.1].
*/
struct wusb_keydvt_out {
u8 kck[16];
u8 ptk[16];
} __attribute__((packed));
/* Pseudo Random Function WUSB1.0[6.5] */
extern int wusb_crypto_init(void);
extern void wusb_crypto_exit(void);
extern ssize_t wusb_prf(void *out, size_t out_size,
const u8 key[16], const struct aes_ccm_nonce *_n,
const struct aes_ccm_label *a,
const void *b, size_t blen, size_t len);
static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a,
const void *b, size_t blen)
{
return wusb_prf(out, out_size, key, n, a, b, blen, 64);
}
static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a,
const void *b, size_t blen)
{
return wusb_prf(out, out_size, key, n, a, b, blen, 128);
}
static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a,
const void *b, size_t blen)
{
return wusb_prf(out, out_size, key, n, a, b, blen, 256);
}
/* Key derivation WUSB1.0[6.5.1] */
static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
const u8 key[16],
const struct aes_ccm_nonce *n,
const struct wusb_keydvt_in *keydvt_in)
{
const struct aes_ccm_label a = { .data = "Pair-wise keys" };
return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
keydvt_in, sizeof(*keydvt_in));
}
/*
* Out-of-band MIC Generation WUSB1.0[6.5.2]
*
* Compute the MIC over @key, @n and @hs and place it in @mic_out.
*
* @mic_out: Where to place the 8 byte MIC tag
* @key: KCK from the derivation process
* @n: CCM nonce, n->sfn == 0, TKID as established in the
* process.
* @hs: Handshake struct for phase 2 of the 4-way.
* hs->bStatus and hs->bReserved are zero.
* hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
* hs->dest_addr is the device's USB address padded with 0
* hs->src_addr is the hosts's UWB device address
* hs->mic is ignored (as we compute that value).
*/
static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
const struct aes_ccm_nonce *n,
const struct usb_handshake *hs)
{
const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
return wusb_prf_64(mic_out, 8, key, n, &a,
hs, sizeof(*hs) - sizeof(hs->MIC));
}
#endif /* #ifndef __WUSB_H__ */

View File

@ -1,303 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* MMC (Microscheduled Management Command) handling
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* WUIEs and MMC IEs...well, they are almost the same at the end. MMC
* IEs are Wireless USB IEs that go into the MMC period...[what is
* that? look in Design-overview.txt].
*
*
* This is a simple subsystem to keep track of which IEs are being
* sent by the host in the MMC period.
*
* For each WUIE we ask to send, we keep it in an array, so we can
* request its removal later, or replace the content. They are tracked
* by pointer, so be sure to use the same pointer if you want to
* remove it or update the contents.
*
* FIXME:
* - add timers that autoremove intervalled IEs?
*/
#include <linux/slab.h>
#include <linux/export.h>
#include "include/wusb.h"
#include "wusbhc.h"
/* Initialize the MMCIEs handling mechanism */
int wusbhc_mmcie_create(struct wusbhc *wusbhc)
{
u8 mmcies = wusbhc->mmcies_max;
wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
if (wusbhc->mmcie == NULL)
return -ENOMEM;
mutex_init(&wusbhc->mmcie_mutex);
return 0;
}
/* Release resources used by the MMCIEs handling mechanism */
void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
{
kfree(wusbhc->mmcie);
}
/*
* Add or replace an MMC Wireless USB IE.
*
* @interval: See WUSB1.0[8.5.3.1]
* @repeat_cnt: See WUSB1.0[8.5.3.1]
* @handle: See WUSB1.0[8.5.3.1]
* @wuie: Pointer to the header of the WUSB IE data to add.
* MUST BE allocated in a kmalloc buffer (no stack or
* vmalloc).
* THE CALLER ALWAYS OWNS THE POINTER (we don't free it
* on remove, we just forget about it).
* @returns: 0 if ok, < 0 errno code on error.
*
* Goes over the *whole* @wusbhc->mmcie array looking for (a) the
* first free spot and (b) if @wuie is already in the array (aka:
* transmitted in the MMCs) the spot were it is.
*
* If present, we "overwrite it" (update).
*
*
* NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
* The host uses the handle as the 'sort' index. We
* allocate the last one always for the WUIE_ID_HOST_INFO, and
* the rest, first come first serve in inverse order.
*
* Host software must make sure that it adds the other IEs in
* the right order... the host hardware is responsible for
* placing the WCTA IEs in the right place with the other IEs
* set by host software.
*
* NOTE: we can access wusbhc->wa_descr without locking because it is
* read only.
*/
int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
struct wuie_hdr *wuie)
{
int result = -ENOBUFS;
unsigned handle, itr;
/* Search a handle, taking into account the ordering */
mutex_lock(&wusbhc->mmcie_mutex);
switch (wuie->bIEIdentifier) {
case WUIE_ID_HOST_INFO:
/* Always last */
handle = wusbhc->mmcies_max - 1;
break;
case WUIE_ID_ISOCH_DISCARD:
dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
"unimplemented\n", wuie->bIEIdentifier);
result = -ENOSYS;
goto error_unlock;
default:
/* search for it or find the last empty slot */
handle = ~0;
for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
if (wusbhc->mmcie[itr] == wuie) {
handle = itr;
break;
}
if (wusbhc->mmcie[itr] == NULL)
handle = itr;
}
if (handle == ~0)
goto error_unlock;
}
result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
wuie);
if (result >= 0)
wusbhc->mmcie[handle] = wuie;
error_unlock:
mutex_unlock(&wusbhc->mmcie_mutex);
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
/*
* Remove an MMC IE previously added with wusbhc_mmcie_set()
*
* @wuie Pointer used to add the WUIE
*/
void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
{
int result;
unsigned handle, itr;
mutex_lock(&wusbhc->mmcie_mutex);
for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
if (wusbhc->mmcie[itr] == wuie) {
handle = itr;
goto found;
}
}
mutex_unlock(&wusbhc->mmcie_mutex);
return;
found:
result = (wusbhc->mmcie_rm)(wusbhc, handle);
if (result == 0)
wusbhc->mmcie[itr] = NULL;
mutex_unlock(&wusbhc->mmcie_mutex);
}
EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
static int wusbhc_mmc_start(struct wusbhc *wusbhc)
{
int ret;
mutex_lock(&wusbhc->mutex);
ret = wusbhc->start(wusbhc);
if (ret >= 0)
wusbhc->active = 1;
mutex_unlock(&wusbhc->mutex);
return ret;
}
static void wusbhc_mmc_stop(struct wusbhc *wusbhc)
{
mutex_lock(&wusbhc->mutex);
wusbhc->active = 0;
wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS);
mutex_unlock(&wusbhc->mutex);
}
/*
* wusbhc_start - start transmitting MMCs and accepting connections
* @wusbhc: the HC to start
*
* Establishes a cluster reservation, enables device connections, and
* starts MMCs with appropriate DNTS parameters.
*/
int wusbhc_start(struct wusbhc *wusbhc)
{
int result;
struct device *dev = wusbhc->dev;
WARN_ON(wusbhc->wuie_host_info != NULL);
BUG_ON(wusbhc->uwb_rc == NULL);
result = wusbhc_rsv_establish(wusbhc);
if (result < 0) {
dev_err(dev, "cannot establish cluster reservation: %d\n",
result);
goto error_rsv_establish;
}
result = wusbhc_devconnect_start(wusbhc);
if (result < 0) {
dev_err(dev, "error enabling device connections: %d\n",
result);
goto error_devconnect_start;
}
result = wusbhc_sec_start(wusbhc);
if (result < 0) {
dev_err(dev, "error starting security in the HC: %d\n",
result);
goto error_sec_start;
}
result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval,
wusbhc->dnts_num_slots);
if (result < 0) {
dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
goto error_set_num_dnts;
}
result = wusbhc_mmc_start(wusbhc);
if (result < 0) {
dev_err(dev, "error starting wusbch: %d\n", result);
goto error_wusbhc_start;
}
return 0;
error_wusbhc_start:
wusbhc_sec_stop(wusbhc);
error_set_num_dnts:
error_sec_start:
wusbhc_devconnect_stop(wusbhc);
error_devconnect_start:
wusbhc_rsv_terminate(wusbhc);
error_rsv_establish:
return result;
}
/*
* wusbhc_stop - stop transmitting MMCs
* @wusbhc: the HC to stop
*
* Stops the WUSB channel and removes the cluster reservation.
*/
void wusbhc_stop(struct wusbhc *wusbhc)
{
wusbhc_mmc_stop(wusbhc);
wusbhc_sec_stop(wusbhc);
wusbhc_devconnect_stop(wusbhc);
wusbhc_rsv_terminate(wusbhc);
}
/*
* Set/reset/update a new CHID
*
* Depending on the previous state of the MMCs, start, stop or change
* the sent MMC. This effectively switches the host controller on and
* off (radio wise).
*/
int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
{
int result = 0;
if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
chid = NULL;
mutex_lock(&wusbhc->mutex);
if (chid) {
if (wusbhc->active) {
mutex_unlock(&wusbhc->mutex);
return -EBUSY;
}
wusbhc->chid = *chid;
}
/* register with UWB if we haven't already since we are about to start
the radio. */
if ((chid) && (wusbhc->uwb_rc == NULL)) {
wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
if (wusbhc->uwb_rc == NULL) {
result = -ENODEV;
dev_err(wusbhc->dev,
"Cannot get associated UWB Host Controller\n");
goto error_rc_get;
}
result = wusbhc_pal_register(wusbhc);
if (result < 0) {
dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n");
goto error_pal_register;
}
}
mutex_unlock(&wusbhc->mutex);
if (chid)
result = uwb_radio_start(&wusbhc->pal);
else if (wusbhc->uwb_rc)
uwb_radio_stop(&wusbhc->pal);
return result;
error_pal_register:
uwb_rc_put(wusbhc->uwb_rc);
wusbhc->uwb_rc = NULL;
error_rc_get:
mutex_unlock(&wusbhc->mutex);
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_chid_set);

View File

@ -1,45 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* UWB Protocol Adaptation Layer (PAL) glue.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include "wusbhc.h"
static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
{
struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
wusbhc_stop(wusbhc);
else
wusbhc_start(wusbhc);
}
/**
* wusbhc_pal_register - register the WUSB HC as a UWB PAL
* @wusbhc: the WUSB HC
*/
int wusbhc_pal_register(struct wusbhc *wusbhc)
{
uwb_pal_init(&wusbhc->pal);
wusbhc->pal.name = "wusbhc";
wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
wusbhc->pal.rc = wusbhc->uwb_rc;
wusbhc->pal.channel_changed = wusbhc_channel_changed;
return uwb_pal_register(&wusbhc->pal);
}
/**
* wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL
* @wusbhc: the WUSB HC
*/
void wusbhc_pal_unregister(struct wusbhc *wusbhc)
{
if (wusbhc->uwb_rc)
uwb_pal_unregister(&wusbhc->pal);
}

View File

@ -1,110 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* WUSB cluster reservation management
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
#include "../uwb/uwb.h"
#include "wusbhc.h"
/*
* WUSB cluster reservations are multicast reservations with the
* broadcast cluster ID (BCID) as the target DevAddr.
*
* FIXME: consider adjusting the reservation depending on what devices
* are attached.
*/
static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
const struct uwb_mas_bm *mas)
{
if (mas == NULL)
mas = &uwb_mas_bm_zero;
return wusbhc->bwa_set(wusbhc, stream, mas);
}
/**
* wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
* @rsv: the reservation
*
* Either set or clear the HC's view of the reservation.
*
* FIXME: when a reservation is denied the HC should be stopped.
*/
static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
{
struct wusbhc *wusbhc = rsv->pal_priv;
struct device *dev = wusbhc->dev;
struct uwb_mas_bm mas;
dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_get_usable_mas(rsv, &mas);
dev_dbg(dev, "established reservation: %*pb\n",
UWB_NUM_MAS, mas.bm);
wusbhc_bwa_set(wusbhc, rsv->stream, &mas);
break;
case UWB_RSV_STATE_NONE:
dev_dbg(dev, "removed reservation\n");
wusbhc_bwa_set(wusbhc, 0, NULL);
break;
default:
dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
break;
}
}
/**
* wusbhc_rsv_establish - establish a reservation for the cluster
* @wusbhc: the WUSB HC requesting a bandwidth reservation
*/
int wusbhc_rsv_establish(struct wusbhc *wusbhc)
{
struct uwb_rc *rc = wusbhc->uwb_rc;
struct uwb_rsv *rsv;
struct uwb_dev_addr bcid;
int ret;
if (rc == NULL)
return -ENODEV;
rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
if (rsv == NULL)
return -ENOMEM;
bcid.data[0] = wusbhc->cluster_id;
bcid.data[1] = 0;
rsv->target.type = UWB_RSV_TARGET_DEVADDR;
rsv->target.devaddr = bcid;
rsv->type = UWB_DRP_TYPE_PRIVATE;
rsv->max_mas = 256; /* try to get as much as possible */
rsv->min_mas = 15; /* one MAS per zone */
rsv->max_interval = 1; /* max latency is one zone */
rsv->is_multicast = true;
ret = uwb_rsv_establish(rsv);
if (ret == 0)
wusbhc->rsv = rsv;
else
uwb_rsv_destroy(rsv);
return ret;
}
/**
* wusbhc_rsv_terminate - terminate the cluster reservation
* @wusbhc: the WUSB host whose reservation is to be terminated
*/
void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
{
if (wusbhc->rsv) {
uwb_rsv_terminate(wusbhc->rsv);
uwb_rsv_destroy(wusbhc->rsv);
wusbhc->rsv = NULL;
}
}

View File

@ -1,426 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Root Hub operations
*
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* We fake a root hub that has fake ports (as many as simultaneous
* devices the Wireless USB Host Controller can deal with). For each
* port we keep an state in @wusbhc->port[index] identical to the one
* specified in the USB2.0[ch11] spec and some extra device
* information that complements the one in 'struct usb_device' (as
* this lacs a hcpriv pointer).
*
* Note this is common to WHCI and HWA host controllers.
*
* Through here we enable most of the state changes that the USB stack
* will use to connect or disconnect devices. We need to do some
* forced adaptation of Wireless USB device states vs. wired:
*
* USB: WUSB:
*
* Port Powered-off port slot n/a
* Powered-on port slot available
* Disconnected port slot available
* Connected port slot assigned device
* device sent DN_Connect
* device was authenticated
* Enabled device is authenticated, transitioned
* from unauth -> auth -> default address
* -> enabled
* Reset disconnect
* Disable disconnect
*
* This maps the standard USB port states with the WUSB device states
* so we can fake ports without having to modify the USB stack.
*
* FIXME: this process will change in the future
*
*
* ENTRY POINTS
*
* Our entry points into here are, as in hcd.c, the USB stack root hub
* ops defined in the usb_hcd struct:
*
* wusbhc_rh_status_data() Provide hub and port status data bitmap
*
* wusbhc_rh_control() Execution of all the major requests
* you can do to a hub (Set|Clear
* features, get descriptors, status, etc).
*
* wusbhc_rh_[suspend|resume]() That
*
* wusbhc_rh_start_port_reset() ??? unimplemented
*/
#include <linux/slab.h>
#include <linux/export.h>
#include "wusbhc.h"
/*
* Reset a fake port
*
* Using a Reset Device IE is too heavyweight as it causes the device
* to enter the UnConnected state and leave the cluster, this can mean
* that when the device reconnects it is connected to a different fake
* port.
*
* Instead, reset authenticated devices with a SetAddress(0), followed
* by a SetAddresss(AuthAddr).
*
* For unauthenticated devices just pretend to reset but do nothing.
* If the device initialization continues to fail it will eventually
* time out after TrustTimeout and enter the UnConnected state.
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*
* Supposedly we are the only thread accesing @wusbhc->port; in any
* case, maybe we should move the mutex locking from
* wusbhc_devconnect_auth() to here.
*
* @port_idx refers to the wusbhc's port index, not the USB port number
*/
static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
{
int result = 0;
struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
struct wusb_dev *wusb_dev = port->wusb_dev;
if (wusb_dev == NULL)
return -ENOTCONN;
port->status |= USB_PORT_STAT_RESET;
port->change |= USB_PORT_STAT_C_RESET;
if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH)
result = 0;
else
result = wusb_dev_update_address(wusbhc, wusb_dev);
port->status &= ~USB_PORT_STAT_RESET;
port->status |= USB_PORT_STAT_ENABLE;
port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
return result;
}
/*
* Return the hub change status bitmap
*
* The bits in the change status bitmap are cleared when a
* ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*
* WARNING!! This gets called from atomic context; we cannot get the
* mutex--the only race condition we can find is some bit
* changing just after we copy it, which shouldn't be too
* big of a problem [and we can't make it an spinlock
* because other parts need to take it and sleep] .
*
* @usb_hcd is refcounted, so it won't disappear under us
* and before killing a host, the polling of the root hub
* would be stopped anyway.
*/
int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
size_t cnt, size, bits_set = 0;
/* WE DON'T LOCK, see comment */
/* round up to bytes. Hub bit is bit 0 so add 1. */
size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8);
/* clear the output buffer. */
memset(_buf, 0, size);
/* set the bit for each changed port. */
for (cnt = 0; cnt < wusbhc->ports_max; cnt++) {
if (wusb_port_by_idx(wusbhc, cnt)->change) {
const int bitpos = cnt+1;
_buf[bitpos/8] |= (1 << (bitpos % 8));
bits_set++;
}
}
return bits_set ? size : 0;
}
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
/*
* Return the hub's descriptor
*
* NOTE: almost cut and paste from ehci-hub.c
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked
*/
static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
u16 wIndex,
struct usb_hub_descriptor *descr,
u16 wLength)
{
u16 temp = 1 + (wusbhc->ports_max / 8);
u8 length = 7 + 2 * temp;
if (wLength < length)
return -ENOSPC;
descr->bDescLength = 7 + 2 * temp;
descr->bDescriptorType = USB_DT_HUB; /* HUB type */
descr->bNbrPorts = wusbhc->ports_max;
descr->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_COMMON_LPSM /* All ports power at once */
| 0x00 /* not part of compound device */
| HUB_CHAR_NO_OCPM /* No overcurrent protection */
| 0x00 /* 8 FS think time FIXME ?? */
| 0x00); /* No port indicators */
descr->bPwrOn2PwrGood = 0;
descr->bHubContrCurrent = 0;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&descr->u.hs.DeviceRemovable[0], 0, temp);
memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp);
return 0;
}
/*
* Clear a hub feature
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*
* Nothing to do, so no locking needed ;)
*/
static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
{
int result;
switch (feature) {
case C_HUB_LOCAL_POWER:
/* FIXME: maybe plug bit 0 to the power input status,
* if any?
* see wusbhc_rh_get_hub_status() */
case C_HUB_OVER_CURRENT:
result = 0;
break;
default:
result = -EPIPE;
}
return result;
}
/*
* Return hub status (it is always zero...)
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*
* Nothing to do, so no locking needed ;)
*/
static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
u16 wLength)
{
/* FIXME: maybe plug bit 0 to the power input status (if any)? */
*buf = 0;
return 0;
}
/*
* Set a port feature
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*/
static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
u8 selector, u8 port_idx)
{
struct device *dev = wusbhc->dev;
if (port_idx > wusbhc->ports_max)
return -EINVAL;
switch (feature) {
/* According to USB2.0[11.24.2.13]p2, these features
* are not required to be implemented. */
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_RESET:
return 0;
case USB_PORT_FEAT_POWER:
/* No such thing, but we fake it works */
mutex_lock(&wusbhc->mutex);
wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
mutex_unlock(&wusbhc->mutex);
return 0;
case USB_PORT_FEAT_RESET:
return wusbhc_rh_port_reset(wusbhc, port_idx);
case USB_PORT_FEAT_ENABLE:
case USB_PORT_FEAT_SUSPEND:
dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
port_idx, feature, selector);
return -ENOSYS;
default:
dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
port_idx, feature, selector);
return -EPIPE;
}
return 0;
}
/*
* Clear a port feature...
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*/
static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
u8 selector, u8 port_idx)
{
int result = 0;
struct device *dev = wusbhc->dev;
if (port_idx > wusbhc->ports_max)
return -EINVAL;
mutex_lock(&wusbhc->mutex);
switch (feature) {
case USB_PORT_FEAT_POWER: /* fake port always on */
/* According to USB2.0[11.24.2.7.1.4], no need to implement? */
case USB_PORT_FEAT_C_OVER_CURRENT:
break;
case USB_PORT_FEAT_C_RESET:
wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
break;
case USB_PORT_FEAT_C_CONNECTION:
wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
break;
case USB_PORT_FEAT_ENABLE:
__wusbhc_dev_disable(wusbhc, port_idx);
break;
case USB_PORT_FEAT_C_ENABLE:
wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
case USB_PORT_FEAT_C_SUSPEND:
dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
port_idx, feature, selector);
result = -ENOSYS;
break;
default:
dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
port_idx, feature, selector);
result = -EPIPE;
break;
}
mutex_unlock(&wusbhc->mutex);
return result;
}
/*
* Return the port's status
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*/
static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
u32 *_buf, u16 wLength)
{
__le16 *buf = (__le16 *)_buf;
if (port_idx > wusbhc->ports_max)
return -EINVAL;
mutex_lock(&wusbhc->mutex);
buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
mutex_unlock(&wusbhc->mutex);
return 0;
}
/*
* Entry point for Root Hub operations
*
* @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
*/
int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
int result = -ENOSYS;
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
switch (reqntype) {
case GetHubDescriptor:
result = wusbhc_rh_get_hub_descr(
wusbhc, wValue, wIndex,
(struct usb_hub_descriptor *) buf, wLength);
break;
case ClearHubFeature:
result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
break;
case GetHubStatus:
result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
break;
case SetPortFeature:
result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
(wIndex & 0xff) - 1);
break;
case ClearPortFeature:
result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
(wIndex & 0xff) - 1);
break;
case GetPortStatus:
result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
(u32 *)buf, wLength);
break;
case SetHubFeature:
default:
dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
"UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
wValue, wIndex, buf, wLength);
/* dump_stack(); */
result = -ENOSYS;
}
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_rh_control);
int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
__func__, usb_hcd, wusbhc, port_idx);
WARN_ON(1);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
static void wusb_port_init(struct wusb_port *port)
{
port->status |= USB_PORT_STAT_HIGH_SPEED;
}
/*
* Alloc fake port specific fields and status.
*/
int wusbhc_rh_create(struct wusbhc *wusbhc)
{
int result = -ENOMEM;
size_t port_size, itr;
port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
wusbhc->port = kzalloc(port_size, GFP_KERNEL);
if (wusbhc->port == NULL)
goto error_port_alloc;
for (itr = 0; itr < wusbhc->ports_max; itr++)
wusb_port_init(&wusbhc->port[itr]);
result = 0;
error_port_alloc:
return result;
}
void wusbhc_rh_destroy(struct wusbhc *wusbhc)
{
kfree(wusbhc->port);
}

View File

@ -1,599 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Security support: encryption enablement, etc
*
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/usb/ch9.h>
#include <linux/random.h>
#include <linux/export.h>
#include "wusbhc.h"
#include <asm/unaligned.h>
static void wusbhc_gtk_rekey_work(struct work_struct *work);
int wusbhc_sec_create(struct wusbhc *wusbhc)
{
/*
* WQ is singlethread because we need to serialize rekey operations.
* Use a separate workqueue for security operations instead of the
* wusbd workqueue because security operations may need to communicate
* directly with downstream wireless devices using synchronous URBs.
* If a device is not responding, this could block other host
* controller operations.
*/
wusbhc->wq_security = create_singlethread_workqueue("wusbd_security");
if (wusbhc->wq_security == NULL) {
pr_err("WUSB-core: Cannot create wusbd_security workqueue\n");
return -ENOMEM;
}
wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
sizeof(wusbhc->gtk.data);
wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
wusbhc->gtk.descr.bReserved = 0;
wusbhc->gtk_index = 0;
INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
return 0;
}
/* Called when the HC is destroyed */
void wusbhc_sec_destroy(struct wusbhc *wusbhc)
{
destroy_workqueue(wusbhc->wq_security);
}
/**
* wusbhc_next_tkid - generate a new, currently unused, TKID
* @wusbhc: the WUSB host controller
* @wusb_dev: the device whose PTK the TKID is for
* (or NULL for a TKID for a GTK)
*
* The generated TKID consists of two parts: the device's authenticated
* address (or 0 or a GTK); and an incrementing number. This ensures
* that TKIDs cannot be shared between devices and by the time the
* incrementing number wraps around the older TKIDs will no longer be
* in use (a maximum of two keys may be active at any one time).
*/
static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
u32 *tkid;
u32 addr;
if (wusb_dev == NULL) {
tkid = &wusbhc->gtk_tkid;
addr = 0;
} else {
tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
addr = wusb_dev->addr & 0x7f;
}
*tkid = (addr << 8) | ((*tkid + 1) & 0xff);
return *tkid;
}
static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
{
const size_t key_size = sizeof(wusbhc->gtk.data);
u32 tkid;
tkid = wusbhc_next_tkid(wusbhc, NULL);
wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
}
/**
* wusbhc_sec_start - start the security management process
* @wusbhc: the WUSB host controller
*
* Generate and set an initial GTK on the host controller.
*
* Called when the HC is started.
*/
int wusbhc_sec_start(struct wusbhc *wusbhc)
{
const size_t key_size = sizeof(wusbhc->gtk.data);
int result;
wusbhc_generate_gtk(wusbhc);
result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
&wusbhc->gtk.descr.bKeyData, key_size);
if (result < 0)
dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
result);
return result;
}
/**
* wusbhc_sec_stop - stop the security management process
* @wusbhc: the WUSB host controller
*
* Wait for any pending GTK rekeys to stop.
*/
void wusbhc_sec_stop(struct wusbhc *wusbhc)
{
cancel_work_sync(&wusbhc->gtk_rekey_work);
}
/** @returns encryption type name */
const char *wusb_et_name(u8 x)
{
switch (x) {
case USB_ENC_TYPE_UNSECURE: return "unsecure";
case USB_ENC_TYPE_WIRED: return "wired";
case USB_ENC_TYPE_CCM_1: return "CCM-1";
case USB_ENC_TYPE_RSA_1: return "RSA-1";
default: return "unknown";
}
}
EXPORT_SYMBOL_GPL(wusb_et_name);
/*
* Set the device encryption method
*
* We tell the device which encryption method to use; we do this when
* setting up the device's security.
*/
static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
{
int result;
struct device *dev = &usb_dev->dev;
struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
if (value) {
value = wusb_dev->ccm1_etd.bEncryptionValue;
} else {
/* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
value = 0;
}
/* Set device's */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Can't set device's WUSB encryption to "
"%s (value %d): %d\n",
wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
wusb_dev->ccm1_etd.bEncryptionValue, result);
return result;
}
/*
* Set the GTK to be used by a device.
*
* The device must be authenticated.
*/
static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
struct usb_device *usb_dev = wusb_dev->usb_dev;
u8 key_index = wusb_key_index(wusbhc->gtk_index,
WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
return usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_DT_KEY << 8 | key_index, 0,
&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
USB_CTRL_SET_TIMEOUT);
}
/* FIXME: prototype for adding security */
int wusb_dev_sec_add(struct wusbhc *wusbhc,
struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
{
int result, bytes, secd_size;
struct device *dev = &usb_dev->dev;
struct usb_security_descriptor *secd, *new_secd;
const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
const void *itr, *top;
char buf[64];
secd = kmalloc(sizeof(*secd), GFP_KERNEL);
if (secd == NULL) {
result = -ENOMEM;
goto out;
}
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, sizeof(*secd));
if (result < (int)sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
}
secd_size = le16_to_cpu(secd->wTotalLength);
new_secd = krealloc(secd, secd_size, GFP_KERNEL);
if (new_secd == NULL) {
dev_err(dev,
"Can't allocate space for security descriptors\n");
result = -ENOMEM;
goto out;
}
secd = new_secd;
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, secd_size);
if (result < secd_size) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
}
bytes = 0;
itr = &secd[1];
top = (void *)secd + result;
while (itr < top) {
etd = itr;
if (top - itr < sizeof(*etd)) {
dev_err(dev, "BUG: bad device security descriptor; "
"not enough data (%zu vs %zu bytes left)\n",
top - itr, sizeof(*etd));
break;
}
if (etd->bLength < sizeof(*etd)) {
dev_err(dev, "BUG: bad device encryption descriptor; "
"descriptor is too short "
"(%u vs %zu needed)\n",
etd->bLength, sizeof(*etd));
break;
}
itr += etd->bLength;
bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
"%s (0x%02x/%02x) ",
wusb_et_name(etd->bEncryptionType),
etd->bEncryptionValue, etd->bAuthKeyIndex);
if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
ccm1_etd = etd;
}
/* This code only supports CCM1 as of now. */
/* FIXME: user has to choose which sec mode to use?
* In theory we want CCM */
if (ccm1_etd == NULL) {
dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
"can't use!\n");
result = -EINVAL;
goto out;
}
wusb_dev->ccm1_etd = *ccm1_etd;
dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
buf, wusb_et_name(ccm1_etd->bEncryptionType),
ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
result = 0;
out:
kfree(secd);
return result;
}
void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
{
/* Nothing so far */
}
/**
* Update the address of an unauthenticated WUSB device
*
* Once we have successfully authenticated, we take it to addr0 state
* and then to a normal address.
*
* Before the device's address (as known by it) was usb_dev->devnum |
* 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
*/
int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
int result = -ENOMEM;
struct usb_device *usb_dev = wusb_dev->usb_dev;
struct device *dev = &usb_dev->dev;
u8 new_address = wusb_dev->addr & 0x7F;
/* Set address 0 */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ADDRESS,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address 0: %d\n",
result);
goto error_addr0;
}
result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
if (result < 0)
goto error_addr0;
usb_set_device_state(usb_dev, USB_STATE_DEFAULT);
usb_ep0_reinit(usb_dev);
/* Set new (authenticated) address. */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ADDRESS,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
new_address, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address %u: %d\n",
new_address, result);
goto error_addr;
}
result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
if (result < 0)
goto error_addr;
usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
usb_ep0_reinit(usb_dev);
usb_dev->authenticated = 1;
error_addr:
error_addr0:
return result;
}
/*
*
*
*/
/* FIXME: split and cleanup */
int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_ckhdid *ck)
{
int result = -ENOMEM;
struct usb_device *usb_dev = wusb_dev->usb_dev;
struct device *dev = &usb_dev->dev;
u32 tkid;
struct usb_handshake *hs;
struct aes_ccm_nonce ccm_n;
u8 mic[8];
struct wusb_keydvt_in keydvt_in;
struct wusb_keydvt_out keydvt_out;
hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
if (!hs)
goto error_kzalloc;
/* We need to turn encryption before beginning the 4way
* hshake (WUSB1.0[.3.2.2]) */
result = wusb_dev_set_encryption(usb_dev, 1);
if (result < 0)
goto error_dev_set_encryption;
tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
hs[0].bMessageNumber = 1;
hs[0].bStatus = 0;
put_unaligned_le32(tkid, hs[0].tTKID);
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
result = usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake1: request failed: %d\n", result);
goto error_hs1;
}
/* Handshake 2, from the device -- need to verify fields */
result = usb_control_msg(
usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_HANDSHAKE,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake2: request failed: %d\n", result);
goto error_hs2;
}
result = -EINVAL;
if (hs[1].bMessageNumber != 2) {
dev_err(dev, "Handshake2 failed: bad message number %u\n",
hs[1].bMessageNumber);
goto error_hs2;
}
if (hs[1].bStatus != 0) {
dev_err(dev, "Handshake2 failed: bad status %u\n",
hs[1].bStatus);
goto error_hs2;
}
if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
dev_err(dev, "Handshake2 failed: TKID mismatch "
"(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
goto error_hs2;
}
if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
dev_err(dev, "Handshake2 failed: CDID mismatch\n");
goto error_hs2;
}
/* Setup the CCM nonce */
memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
put_unaligned_le32(tkid, ccm_n.tkid);
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
ccm_n.dest_addr.data[1] = 0;
/* Derive the KCK and PTK from CK, the CCM, H and D nonces */
memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
if (result < 0) {
dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
result);
goto error_hs2;
}
/* Compute MIC and verify it */
result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
if (result < 0) {
dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
result);
goto error_hs2;
}
if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
dev_err(dev, "Handshake2 failed: MIC mismatch\n");
goto error_hs2;
}
/* Send Handshake3 */
hs[2].bMessageNumber = 3;
hs[2].bStatus = 0;
put_unaligned_le32(tkid, hs[2].tTKID);
hs[2].bReserved = 0;
memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
if (result < 0) {
dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
result);
goto error_hs2;
}
result = usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake3: request failed: %d\n", result);
goto error_hs3;
}
result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
keydvt_out.ptk, sizeof(keydvt_out.ptk));
if (result < 0)
goto error_wusbhc_set_ptk;
result = wusb_dev_set_gtk(wusbhc, wusb_dev);
if (result < 0) {
dev_err(dev, "Set GTK for device: request failed: %d\n",
result);
goto error_wusbhc_set_gtk;
}
/* Update the device's address from unauth to auth */
if (usb_dev->authenticated == 0) {
result = wusb_dev_update_address(wusbhc, wusb_dev);
if (result < 0)
goto error_dev_update_address;
}
result = 0;
dev_info(dev, "device authenticated\n");
error_dev_update_address:
error_wusbhc_set_gtk:
error_wusbhc_set_ptk:
error_hs3:
error_hs2:
error_hs1:
memset(hs, 0, 3*sizeof(hs[0]));
memzero_explicit(&keydvt_out, sizeof(keydvt_out));
memzero_explicit(&keydvt_in, sizeof(keydvt_in));
memzero_explicit(&ccm_n, sizeof(ccm_n));
memzero_explicit(mic, sizeof(mic));
if (result < 0)
wusb_dev_set_encryption(usb_dev, 0);
error_dev_set_encryption:
kfree(hs);
error_kzalloc:
return result;
}
/*
* Once all connected and authenticated devices have received the new
* GTK, switch the host to using it.
*/
static void wusbhc_gtk_rekey_work(struct work_struct *work)
{
struct wusbhc *wusbhc = container_of(work,
struct wusbhc, gtk_rekey_work);
size_t key_size = sizeof(wusbhc->gtk.data);
int port_idx;
struct wusb_dev *wusb_dev, *wusb_dev_next;
LIST_HEAD(rekey_list);
mutex_lock(&wusbhc->mutex);
/* generate the new key */
wusbhc_generate_gtk(wusbhc);
/* roll the gtk index. */
wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
/*
* Save all connected devices on a list while holding wusbhc->mutex and
* take a reference to each one. Then submit the set key request to
* them after releasing the lock in order to avoid a deadlock.
*/
for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
wusb_dev = wusbhc->port[port_idx].wusb_dev;
if (!wusb_dev || !wusb_dev->usb_dev
|| !wusb_dev->usb_dev->authenticated)
continue;
wusb_dev_get(wusb_dev);
list_add_tail(&wusb_dev->rekey_node, &rekey_list);
}
mutex_unlock(&wusbhc->mutex);
/* Submit the rekey requests without holding wusbhc->mutex. */
list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
rekey_node) {
list_del_init(&wusb_dev->rekey_node);
dev_dbg(&wusb_dev->usb_dev->dev,
"%s: rekey device at port %d\n",
__func__, wusb_dev->port_idx);
if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
dev_err(&wusb_dev->usb_dev->dev,
"%s: rekey device at port %d failed\n",
__func__, wusb_dev->port_idx);
}
wusb_dev_put(wusb_dev);
}
/* Switch the host controller to use the new GTK. */
mutex_lock(&wusbhc->mutex);
wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
&wusbhc->gtk.descr.bKeyData, key_size);
mutex_unlock(&wusbhc->mutex);
}
/**
* wusbhc_gtk_rekey - generate and distribute a new GTK
* @wusbhc: the WUSB host controller
*
* Generate a new GTK and distribute it to all connected and
* authenticated devices. When all devices have the new GTK, the host
* starts using it.
*
* This must be called after every device disconnect (see [WUSB]
* section 6.2.11.2).
*/
void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
{
/*
* We need to submit a URB to the downstream WUSB devices in order to
* change the group key. This can't be done while holding the
* wusbhc->mutex since that is also taken in the urb_enqueue routine
* and will cause a deadlock. Instead, queue a work item to do
* it when the lock is not held
*/
queue_work(wusbhc->wq_security, &wusbhc->gtk_rekey_work);
}

View File

@ -1,88 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wire Adapter Host Controller Driver
* Common items to HWA and DWA based HCDs
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*/
#include <linux/slab.h>
#include <linux/module.h>
#include "wusbhc.h"
#include "wa-hc.h"
/**
* Assumes
*
* wa->usb_dev and wa->usb_iface initialized and refcounted,
* wa->wa_descr initialized.
*/
int wa_create(struct wahc *wa, struct usb_interface *iface,
kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
if (iface->cur_altsetting->desc.bNumEndpoints < 3)
return -ENODEV;
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
wa->quirks = quirks;
/* Fill up Data Transfer EP pointers */
wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd);
wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL);
if (wa->dti_buf == NULL) {
result = -ENOMEM;
goto error_dti_buf_alloc;
}
result = wa_nep_create(wa, iface);
if (result < 0) {
dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
result);
goto error_nep_create;
}
return 0;
error_nep_create:
kfree(wa->dti_buf);
error_dti_buf_alloc:
wa_rpipes_destroy(wa);
error_rpipes_create:
return result;
}
EXPORT_SYMBOL_GPL(wa_create);
void __wa_destroy(struct wahc *wa)
{
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
}
kfree(wa->dti_buf);
wa_nep_destroy(wa);
wa_rpipes_destroy(wa);
}
EXPORT_SYMBOL_GPL(__wa_destroy);
/**
* wa_reset_all - reset the WA device
* @wa: the WA to be reset
*
* For HWAs the radio controller and all other PALs are also reset.
*/
void wa_reset_all(struct wahc *wa)
{
/* FIXME: assuming HWA. */
wusbhc_reset_all(wa->wusb);
}
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
MODULE_LICENSE("GPL");

View File

@ -1,467 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* HWA Host Controller Driver
* Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This driver implements a USB Host Controller (struct usb_hcd) for a
* Wireless USB Host Controller based on the Wireless USB 1.0
* Host-Wire-Adapter specification (in layman terms, a USB-dongle that
* implements a Wireless USB host).
*
* Check out the Design-overview.txt file in the source documentation
* for other details on the implementation.
*
* Main blocks:
*
* driver glue with the driver API, workqueue daemon
*
* lc RC instance life cycle management (create, destroy...)
*
* hcd glue with the USB API Host Controller Interface API.
*
* nep Notification EndPoint management: collect notifications
* and queue them with the workqueue daemon.
*
* Handle notifications as coming from the NEP. Sends them
* off others to their respective modules (eg: connect,
* disconnect and reset go to devconnect).
*
* rpipe Remote Pipe management; rpipe is what we use to write
* to an endpoint on a WUSB device that is connected to a
* HWA RC.
*
* xfer Transfer management -- this is all the code that gets a
* buffer and pushes it to a device (or viceversa). *
*
* Some day a lot of this code will be shared between this driver and
* the drivers for DWA (xfer, rpipe).
*
* All starts at driver.c:hwahc_probe(), when one of this guys is
* connected. hwahc_disconnect() stops it.
*
* During operation, the main driver is devices connecting or
* disconnecting. They cause the HWA RC to send notifications into
* nep.c:hwahc_nep_cb() that will dispatch them to
* notif.c:wa_notif_dispatch(). From there they will fan to cause
* device connects, disconnects, etc.
*
* Note much of the activity is difficult to follow. For example a
* device connect goes to devconnect, which will cause the "fake" root
* hub port to show a connect and stop there. Then hub_wq will notice
* and call into the rh.c:hwahc_rc_port_reset() code to authenticate
* the device (and this might require user intervention) and enable
* the port.
*
* We also have a timer workqueue going from devconnect.c that
* schedules in hwahc_devconnect_create().
*
* The rest of the traffic is in the usual entry points of a USB HCD,
* which are hooked up in driver.c:hwahc_rc_driver, and defined in
* hcd.c.
*/
#ifndef __HWAHC_INTERNAL_H__
#define __HWAHC_INTERNAL_H__
#include <linux/completion.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "../uwb/uwb.h"
#include "include/wusb.h"
#include "include/wusb-wa.h"
struct wusbhc;
struct wahc;
extern void wa_urb_enqueue_run(struct work_struct *ws);
extern void wa_process_errored_transfers_run(struct work_struct *ws);
/**
* RPipe instance
*
* @descr's fields are kept in LE, as we need to send it back and
* forth.
*
* @wa is referenced when set
*
* @segs_available is the number of requests segments that still can
* be submitted to the controller without overloading
* it. It is initialized to descr->wRequests when
* aiming.
*
* A rpipe supports a max of descr->wRequests at the same time; before
* submitting seg_lock has to be taken. If segs_avail > 0, then we can
* submit; if not, we have to queue them.
*/
struct wa_rpipe {
struct kref refcnt;
struct usb_rpipe_descriptor descr;
struct usb_host_endpoint *ep;
struct wahc *wa;
spinlock_t seg_lock;
struct list_head seg_list;
struct list_head list_node;
atomic_t segs_available;
u8 buffer[1]; /* For reads/writes on USB */
};
enum wa_dti_state {
WA_DTI_TRANSFER_RESULT_PENDING,
WA_DTI_ISOC_PACKET_STATUS_PENDING,
WA_DTI_BUF_IN_DATA_PENDING
};
enum wa_quirks {
/*
* The Alereon HWA expects the data frames in isochronous transfer
* requests to be concatenated and not sent as separate packets.
*/
WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
/*
* The Alereon HWA can be instructed to not send transfer notifications
* as an optimization.
*/
WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
};
enum wa_vendor_specific_requests {
WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
WA_REQ_ALEREON_FEATURE_SET = 0x01,
WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
};
#define WA_MAX_BUF_IN_URBS 4
/**
* Instance of a HWA Host Controller
*
* Except where a more specific lock/mutex applies or atomic, all
* fields protected by @mutex.
*
* @wa_descr Can be accessed without locking because it is in
* the same area where the device descriptors were
* read, so it is guaranteed to exist unmodified while
* the device exists.
*
* Endianess has been converted to CPU's.
*
* @nep_* can be accessed without locking as its processing is
* serialized; we submit a NEP URB and it comes to
* hwahc_nep_cb(), which won't issue another URB until it is
* done processing it.
*
* @xfer_list:
*
* List of active transfers to verify existence from a xfer id
* gotten from the xfer result message. Can't use urb->list because
* it goes by endpoint, and we don't know the endpoint at the time
* when we get the xfer result message. We can't really rely on the
* pointer (will have to change for 64 bits) as the xfer id is 32 bits.
*
* @xfer_delayed_list: List of transfers that need to be started
* (with a workqueue, because they were
* submitted from an atomic context).
*
* FIXME: this needs to be layered up: a wusbhc layer (for sharing
* commonalities with WHCI), a wa layer (for sharing
* commonalities with DWA-RC).
*/
struct wahc {
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
/* HC to deliver notifications */
union {
struct wusbhc *wusb;
struct dwahc *dwa;
};
const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
const struct usb_wa_descriptor *wa_descr;
struct urb *nep_urb; /* Notification EndPoint [lockless] */
struct edc nep_edc;
void *nep_buffer;
size_t nep_buffer_size;
atomic_t notifs_queued;
u16 rpipes;
unsigned long *rpipe_bm; /* rpipe usage bitmap */
struct list_head rpipe_delayed_list; /* delayed RPIPES. */
spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
struct mutex rpipe_mutex; /* assigning resources to endpoints */
/*
* dti_state is used to track the state of the dti_urb. When dti_state
* is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
* dti_isoc_xfer_seg identify which xfer the incoming isoc packet
* status refers to.
*/
enum wa_dti_state dti_state;
u32 dti_isoc_xfer_in_progress;
u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
/* URBs for reading data in */
struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
int active_buf_in_urbs; /* number of buf_in_urbs active. */
struct edc dti_edc; /* DTI error density counter */
void *dti_buf;
size_t dti_buf_size;
unsigned long dto_in_use; /* protect dto endoint serialization */
s32 status; /* For reading status */
struct list_head xfer_list;
struct list_head xfer_delayed_list;
struct list_head xfer_errored_list;
/*
* lock for the above xfer lists. Can be taken while a xfer->lock is
* held but not in the reverse order.
*/
spinlock_t xfer_list_lock;
struct work_struct xfer_enqueue_work;
struct work_struct xfer_error_work;
atomic_t xfer_id_count;
kernel_ulong_t quirks;
};
extern int wa_create(struct wahc *wa, struct usb_interface *iface,
kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
extern int wa_dti_start(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
/* Miscellaneous constants */
enum {
/** Max number of EPROTO errors we tolerate on the NEP in a
* period of time */
HWAHC_EPROTO_MAX = 16,
/** Period of time for EPROTO errors (in jiffies) */
HWAHC_EPROTO_PERIOD = 4 * HZ,
};
/* Notification endpoint handling */
extern int wa_nep_create(struct wahc *, struct usb_interface *);
extern void wa_nep_destroy(struct wahc *);
static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
{
struct urb *urb = wa->nep_urb;
urb->transfer_buffer = wa->nep_buffer;
urb->transfer_buffer_length = wa->nep_buffer_size;
return usb_submit_urb(urb, gfp_mask);
}
static inline void wa_nep_disarm(struct wahc *wa)
{
usb_kill_urb(wa->nep_urb);
}
/* RPipes */
static inline void wa_rpipe_init(struct wahc *wa)
{
INIT_LIST_HEAD(&wa->rpipe_delayed_list);
spin_lock_init(&wa->rpipe_lock);
mutex_init(&wa->rpipe_mutex);
}
static inline void wa_init(struct wahc *wa)
{
int index;
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
wa_rpipe_init(wa);
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
INIT_LIST_HEAD(&wa->xfer_delayed_list);
INIT_LIST_HEAD(&wa->xfer_errored_list);
spin_lock_init(&wa->xfer_list_lock);
INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
/* init the buf in URBs */
for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
usb_init_urb(&(wa->buf_in_urbs[index]));
wa->active_buf_in_urbs = 0;
}
/**
* Destroy a pipe (when refcount drops to zero)
*
* Assumes it has been moved to the "QUIESCING" state.
*/
struct wa_xfer;
extern void rpipe_destroy(struct kref *_rpipe);
static inline
void __rpipe_get(struct wa_rpipe *rpipe)
{
kref_get(&rpipe->refcnt);
}
extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
struct urb *, gfp_t);
static inline void rpipe_put(struct wa_rpipe *rpipe)
{
kref_put(&rpipe->refcnt, rpipe_destroy);
}
extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
extern void rpipe_clear_feature_stalled(struct wahc *,
struct usb_host_endpoint *);
extern int wa_rpipes_create(struct wahc *);
extern void wa_rpipes_destroy(struct wahc *);
static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
{
atomic_dec(&rpipe->segs_available);
}
/**
* Returns true if the rpipe is ready to submit more segments.
*/
static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
{
return atomic_inc_return(&rpipe->segs_available) > 0
&& !list_empty(&rpipe->seg_list);
}
/* Transferring data */
extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
struct urb *, gfp_t);
extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
/* Misc
*
* FIXME: Refcounting for the actual @hwahc object is not correct; I
* mean, this should be refcounting on the HCD underneath, but
* it is not. In any case, the semantics for HCD refcounting
* are *weird*...on refcount reaching zero it just frees
* it...no RC specific function is called...unless I miss
* something.
*
* FIXME: has to go away in favour of a 'struct' hcd based solution
*/
static inline struct wahc *wa_get(struct wahc *wa)
{
usb_get_intf(wa->usb_iface);
return wa;
}
static inline void wa_put(struct wahc *wa)
{
usb_put_intf(wa->usb_iface);
}
static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
{
return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
feature,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static inline int __wa_set_feature(struct wahc *wa, u16 feature)
{
return __wa_feature(wa, 1, feature);
}
static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
{
return __wa_feature(wa, 0, feature);
}
/**
* Return the status of a Wire Adapter
*
* @wa: Wire Adapter instance
* @returns < 0 errno code on error, or status bitmap as described
* in WUSB1.0[8.3.1.6].
*
* NOTE: need malloc, some arches don't take USB from the stack
*/
static inline
s32 __wa_get_status(struct wahc *wa)
{
s32 result;
result = usb_control_msg(
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
&wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
if (result >= 0)
result = wa->status;
return result;
}
/**
* Waits until the Wire Adapter's status matches @mask/@value
*
* @wa: Wire Adapter instance.
* @returns < 0 errno code on error, otherwise status.
*
* Loop until the WAs status matches the mask and value (status & mask
* == value). Timeout if it doesn't happen.
*
* FIXME: is there an official specification on how long status
* changes can take?
*/
static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
{
s32 result;
unsigned loops = 10;
do {
msleep(50);
result = __wa_get_status(wa);
if ((result & mask) == value)
break;
if (loops-- == 0) {
result = -ETIMEDOUT;
break;
}
} while (result >= 0);
return result;
}
/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
static inline int __wa_stop(struct wahc *wa)
{
int result;
struct device *dev = &wa->usb_iface->dev;
result = __wa_clear_feature(wa, WA_ENABLE);
if (result < 0 && result != -ENODEV) {
dev_err(dev, "error commanding HC to stop: %d\n", result);
goto out;
}
result = __wa_wait_status(wa, WA_ENABLE, 0);
if (result < 0 && result != -ENODEV)
dev_err(dev, "error waiting for HC to stop: %d\n", result);
out:
return 0;
}
#endif /* #ifndef __HWAHC_INTERNAL_H__ */

View File

@ -1,289 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* Notification EndPoint support
*
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This part takes care of getting the notification from the hw
* only and dispatching through wusbwad into
* wa_notif_dispatch. Handling is done there.
*
* WA notifications are limited in size; most of them are three or
* four bytes long, and the longest is the HWA Device Notification,
* which would not exceed 38 bytes (DNs are limited in payload to 32
* bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
* header (WUSB1.0[8.5.4.2]).
*
* It is not clear if more than one Device Notification can be packed
* in a HWA Notification, I assume no because of the wording in
* WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
* get is 256 bytes (as the bLength field is a byte).
*
* So what we do is we have this buffer and read into it; when a
* notification arrives we schedule work to a specific, single thread
* workqueue (so notifications are serialized) and copy the
* notification data. After scheduling the work, we rearm the read from
* the notification endpoint.
*
* Entry points here are:
*
* wa_nep_[create|destroy]() To initialize/release this subsystem
*
* wa_nep_cb() Callback for the notification
* endpoint; when data is ready, this
* does the dispatching.
*/
#include <linux/workqueue.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include "wa-hc.h"
#include "wusbhc.h"
/* Structure for queueing notifications to the workqueue */
struct wa_notif_work {
struct work_struct work;
struct wahc *wa;
size_t size;
u8 data[];
};
/*
* Process incoming notifications from the WA's Notification EndPoint
* [the wuswad daemon, basically]
*
* @_nw: Pointer to a descriptor which has the pointer to the
* @wa, the size of the buffer and the work queue
* structure (so we can free all when done).
* @returns 0 if ok, < 0 errno code on error.
*
* All notifications follow the same format; they need to start with a
* 'struct wa_notif_hdr' header, so it is easy to parse through
* them. We just break the buffer in individual notifications (the
* standard doesn't say if it can be done or is forbidden, so we are
* cautious) and dispatch each.
*
* So the handling layers are is:
*
* WA specific notification (from NEP)
* Device Notification Received -> wa_handle_notif_dn()
* WUSB Device notification generic handling
* BPST Adjustment -> wa_handle_notif_bpst_adj()
* ... -> ...
*
* @wa has to be referenced
*/
static void wa_notif_dispatch(struct work_struct *ws)
{
void *itr;
u8 missing = 0;
struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
work);
struct wahc *wa = nw->wa;
struct wa_notif_hdr *notif_hdr;
size_t size;
struct device *dev = &wa->usb_iface->dev;
#if 0
/* FIXME: need to check for this??? */
if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
goto out; /* screw it */
#endif
atomic_dec(&wa->notifs_queued); /* Throttling ctl */
size = nw->size;
itr = nw->data;
while (size) {
if (size < sizeof(*notif_hdr)) {
missing = sizeof(*notif_hdr) - size;
goto exhausted_buffer;
}
notif_hdr = itr;
if (size < notif_hdr->bLength)
goto exhausted_buffer;
itr += notif_hdr->bLength;
size -= notif_hdr->bLength;
/* Dispatch the notification [don't use itr or size!] */
switch (notif_hdr->bNotifyType) {
case HWA_NOTIF_DN: {
struct hwa_notif_dn *hwa_dn;
hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
hdr);
wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
hwa_dn->dndata,
notif_hdr->bLength - sizeof(*hwa_dn));
break;
}
case WA_NOTIF_TRANSFER:
wa_handle_notif_xfer(wa, notif_hdr);
break;
case HWA_NOTIF_BPST_ADJ:
break; /* no action needed for BPST ADJ. */
case DWA_NOTIF_RWAKE:
case DWA_NOTIF_PORTSTATUS:
/* FIXME: unimplemented WA NOTIFs */
/* fallthru */
default:
dev_err(dev, "HWA: unknown notification 0x%x, "
"%zu bytes; discarding\n",
notif_hdr->bNotifyType,
(size_t)notif_hdr->bLength);
break;
}
}
out:
wa_put(wa);
kfree(nw);
return;
/* THIS SHOULD NOT HAPPEN
*
* Buffer exahusted with partial data remaining; just warn and
* discard the data, as this should not happen.
*/
exhausted_buffer:
dev_warn(dev, "HWA: device sent short notification, "
"%d bytes missing; discarding %d bytes.\n",
missing, (int)size);
goto out;
}
/*
* Deliver incoming WA notifications to the wusbwa workqueue
*
* @wa: Pointer the Wire Adapter Controller Data Streaming
* instance (part of an 'struct usb_hcd').
* @size: Size of the received buffer
* @returns 0 if ok, < 0 errno code on error.
*
* The input buffer is @wa->nep_buffer, with @size bytes
* (guaranteed to fit in the allocated space,
* @wa->nep_buffer_size).
*/
static int wa_nep_queue(struct wahc *wa, size_t size)
{
int result = 0;
struct device *dev = &wa->usb_iface->dev;
struct wa_notif_work *nw;
/* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
BUG_ON(size > wa->nep_buffer_size);
if (size == 0)
goto out;
if (atomic_read(&wa->notifs_queued) > 200) {
if (printk_ratelimit())
dev_err(dev, "Too many notifications queued, "
"throttling back\n");
goto out;
}
nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
if (nw == NULL) {
if (printk_ratelimit())
dev_err(dev, "No memory to queue notification\n");
result = -ENOMEM;
goto out;
}
INIT_WORK(&nw->work, wa_notif_dispatch);
nw->wa = wa_get(wa);
nw->size = size;
memcpy(nw->data, wa->nep_buffer, size);
atomic_inc(&wa->notifs_queued); /* Throttling ctl */
queue_work(wusbd, &nw->work);
out:
/* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
return result;
}
/*
* Callback for the notification event endpoint
*
* Check's that everything is fine and then passes the data to be
* queued to the workqueue.
*/
static void wa_nep_cb(struct urb *urb)
{
int result;
struct wahc *wa = urb->context;
struct device *dev = &wa->usb_iface->dev;
switch (result = urb->status) {
case 0:
result = wa_nep_queue(wa, urb->actual_length);
if (result < 0)
dev_err(dev, "NEP: unable to process notification(s): "
"%d\n", result);
break;
case -ECONNRESET: /* Not an error, but a controlled situation; */
case -ENOENT: /* (we killed the URB)...so, no broadcast */
case -ESHUTDOWN:
dev_dbg(dev, "NEP: going down %d\n", urb->status);
goto out;
default: /* On general errors, we retry unless it gets ugly */
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "NEP: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
goto out;
}
dev_err(dev, "NEP: URB error %d\n", urb->status);
}
result = wa_nep_arm(wa, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "NEP: cannot submit URB: %d\n", result);
wa_reset_all(wa);
}
out:
return;
}
/*
* Initialize @wa's notification and event's endpoint stuff
*
* This includes the allocating the read buffer, the context ID
* allocation bitmap, the URB and submitting the URB.
*/
int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
{
int result;
struct usb_endpoint_descriptor *epd;
struct usb_device *usb_dev = interface_to_usbdev(iface);
struct device *dev = &iface->dev;
edc_init(&wa->nep_edc);
epd = &iface->cur_altsetting->endpoint[0].desc;
wa->nep_buffer_size = 1024;
wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
if (!wa->nep_buffer)
goto error_nep_buffer;
wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
if (wa->nep_urb == NULL)
goto error_urb_alloc;
usb_fill_int_urb(wa->nep_urb, usb_dev,
usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
wa->nep_buffer, wa->nep_buffer_size,
wa_nep_cb, wa, epd->bInterval);
result = wa_nep_arm(wa, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "Cannot submit notification URB: %d\n", result);
goto error_nep_arm;
}
return 0;
error_nep_arm:
usb_free_urb(wa->nep_urb);
error_urb_alloc:
kfree(wa->nep_buffer);
error_nep_buffer:
return -ENOMEM;
}
void wa_nep_destroy(struct wahc *wa)
{
wa_nep_disarm(wa);
usb_free_urb(wa->nep_urb);
kfree(wa->nep_buffer);
}

View File

@ -1,539 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter
* rpipe management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* FIXME: docs
*
* RPIPE
*
* Targeted at different downstream endpoints
*
* Descriptor: use to config the remote pipe.
*
* The number of blocks could be dynamic (wBlocks in descriptor is
* 0)--need to schedule them then.
*
* Each bit in wa->rpipe_bm represents if an rpipe is being used or
* not. Rpipes are represented with a 'struct wa_rpipe' that is
* attached to the hcpriv member of a 'struct usb_host_endpoint'.
*
* When you need to xfer data to an endpoint, you get an rpipe for it
* with wa_ep_rpipe_get(), which gives you a reference to the rpipe
* and keeps a single one (the first one) with the endpoint. When you
* are done transferring, you drop that reference. At the end the
* rpipe is always allocated and bound to the endpoint. There it might
* be recycled when not used.
*
* Addresses:
*
* We use a 1:1 mapping mechanism between port address (0 based
* index, actually) and the address. The USB stack knows about this.
*
* USB Stack port number 4 (1 based)
* WUSB code port index 3 (0 based)
* USB Address 5 (2 based -- 0 is for default, 1 for root hub)
*
* Now, because we don't use the concept as default address exactly
* like the (wired) USB code does, we need to kind of skip it. So we
* never take addresses from the urb->pipe, but from the
* urb->dev->devnum, to make sure that we always have the right
* destination address.
*/
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "wusbhc.h"
#include "wa-hc.h"
static int __rpipe_get_descr(struct wahc *wa,
struct usb_rpipe_descriptor *descr, u16 index)
{
ssize_t result;
struct device *dev = &wa->usb_iface->dev;
/* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
* function because the arguments are different.
*/
result = usb_control_msg(
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
index, (int)result);
goto error;
}
if (result < sizeof(*descr)) {
dev_err(dev, "rpipe %u: got short descriptor "
"(%zd vs %zd bytes needed)\n",
index, result, sizeof(*descr));
result = -EINVAL;
goto error;
}
result = 0;
error:
return result;
}
/*
*
* The descriptor is assumed to be properly initialized (ie: you got
* it through __rpipe_get_descr()).
*/
static int __rpipe_set_descr(struct wahc *wa,
struct usb_rpipe_descriptor *descr, u16 index)
{
ssize_t result;
struct device *dev = &wa->usb_iface->dev;
/* we cannot use the usb_get_descriptor() function because the
* arguments are different.
*/
result = usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
index, (int)result);
goto error;
}
if (result < sizeof(*descr)) {
dev_err(dev, "rpipe %u: sent short descriptor "
"(%zd vs %zd bytes required)\n",
index, result, sizeof(*descr));
result = -EINVAL;
goto error;
}
result = 0;
error:
return result;
}
static void rpipe_init(struct wa_rpipe *rpipe)
{
kref_init(&rpipe->refcnt);
spin_lock_init(&rpipe->seg_lock);
INIT_LIST_HEAD(&rpipe->seg_list);
INIT_LIST_HEAD(&rpipe->list_node);
}
static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
spin_lock_irqsave(&wa->rpipe_lock, flags);
rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
if (rpipe_idx < wa->rpipes)
set_bit(rpipe_idx, wa->rpipe_bm);
spin_unlock_irqrestore(&wa->rpipe_lock, flags);
return rpipe_idx;
}
static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
spin_lock_irqsave(&wa->rpipe_lock, flags);
clear_bit(rpipe_idx, wa->rpipe_bm);
spin_unlock_irqrestore(&wa->rpipe_lock, flags);
}
void rpipe_destroy(struct kref *_rpipe)
{
struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
if (rpipe->ep)
rpipe->ep->hcpriv = NULL;
rpipe_put_idx(rpipe->wa, index);
wa_put(rpipe->wa);
kfree(rpipe);
}
EXPORT_SYMBOL_GPL(rpipe_destroy);
/*
* Locate an idle rpipe, create an structure for it and return it
*
* @wa is referenced and unlocked
* @crs enum rpipe_attr, required endpoint characteristics
*
* The rpipe can be used only sequentially (not in parallel).
*
* The rpipe is moved into the "ready" state.
*/
static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
gfp_t gfp)
{
int result;
unsigned rpipe_idx;
struct wa_rpipe *rpipe;
struct device *dev = &wa->usb_iface->dev;
rpipe = kzalloc(sizeof(*rpipe), gfp);
if (rpipe == NULL)
return -ENOMEM;
rpipe_init(rpipe);
/* Look for an idle pipe */
for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
break;
result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
if (result < 0)
dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
rpipe_idx, result);
else if ((rpipe->descr.bmCharacteristics & crs) != 0)
goto found;
rpipe_put_idx(wa, rpipe_idx);
}
*prpipe = NULL;
kfree(rpipe);
return -ENXIO;
found:
set_bit(rpipe_idx, wa->rpipe_bm);
rpipe->wa = wa_get(wa);
*prpipe = rpipe;
return 0;
}
static int __rpipe_reset(struct wahc *wa, unsigned index)
{
int result;
struct device *dev = &wa->usb_iface->dev;
result = usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_RESET,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "rpipe %u: reset failed: %d\n",
index, result);
return result;
}
/*
* Fake companion descriptor for ep0
*
* See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
*/
static struct usb_wireless_ep_comp_descriptor epc0 = {
.bLength = sizeof(epc0),
.bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
.bMaxBurst = 1,
.bMaxSequence = 2,
};
/*
* Look for EP companion descriptor
*
* Get there, look for Inara in the endpoint's extra descriptors
*/
static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
struct device *dev, struct usb_host_endpoint *ep)
{
void *itr;
size_t itr_size;
struct usb_descriptor_header *hdr;
struct usb_wireless_ep_comp_descriptor *epcd;
if (ep->desc.bEndpointAddress == 0) {
epcd = &epc0;
goto out;
}
itr = ep->extra;
itr_size = ep->extralen;
epcd = NULL;
while (itr_size > 0) {
if (itr_size < sizeof(*hdr)) {
dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
"at offset %zu: only %zu bytes left\n",
ep->desc.bEndpointAddress,
itr - (void *) ep->extra, itr_size);
break;
}
hdr = itr;
if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
epcd = itr;
break;
}
if (hdr->bLength > itr_size) {
dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
"at offset %zu (type 0x%02x) "
"length %d but only %zu bytes left\n",
ep->desc.bEndpointAddress,
itr - (void *) ep->extra, hdr->bDescriptorType,
hdr->bLength, itr_size);
break;
}
itr += hdr->bLength;
itr_size -= hdr->bLength;
}
out:
return epcd;
}
/*
* Aim an rpipe to its device & endpoint destination
*
* Make sure we change the address to unauthenticated if the device
* is WUSB and it is not authenticated.
*/
static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
{
int result = -ENOMSG; /* better code for lack of companion? */
struct device *dev = &wa->usb_iface->dev;
struct usb_device *usb_dev = urb->dev;
struct usb_wireless_ep_comp_descriptor *epcd;
u32 ack_window, epcd_max_sequence;
u8 unauth;
epcd = rpipe_epc_find(dev, ep);
if (epcd == NULL) {
dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
ep->desc.bEndpointAddress);
goto error;
}
unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
atomic_set(&rpipe->segs_available,
le16_to_cpu(rpipe->descr.wRequests));
/* FIXME: block allocation system; request with queuing and timeout */
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
if (usb_endpoint_xfer_isoc(&ep->desc))
rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
else
rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
epcd->bMaxBurst, 16U), 1U);
rpipe->descr.hwa_bDeviceInfoIndex =
wusb_port_no_to_idx(urb->dev->portnum);
/* FIXME: use maximum speed as supported or recommended by device */
rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
urb->dev->devnum, urb->dev->devnum | unauth,
le16_to_cpu(rpipe->descr.wRPipeIndex),
usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
rpipe->descr.hwa_reserved = 0;
rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
/* FIXME: bDataSequence */
rpipe->descr.bDataSequence = 0;
/* start with base window of hwa_bMaxBurst bits starting at 0. */
ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst);
rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window);
epcd_max_sequence = max(min_t(unsigned int,
epcd->bMaxSequence, 32U), 2U);
rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
rpipe->descr.bInterval = ep->desc.bInterval;
if (usb_endpoint_xfer_isoc(&ep->desc))
rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval;
else
rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
/* FIXME: xmit power & preamble blah blah */
rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK);
/* rpipe->descr.bmCharacteristics RO */
rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF);
/* FIXME: use for assessing link quality? */
rpipe->descr.wNumTransactionErrors = 0;
result = __rpipe_set_descr(wa, &rpipe->descr,
le16_to_cpu(rpipe->descr.wRPipeIndex));
if (result < 0) {
dev_err(dev, "Cannot aim rpipe: %d\n", result);
goto error;
}
result = 0;
error:
return result;
}
/*
* Check an aimed rpipe to make sure it points to where we want
*
* We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
* space; when it is like that, we or 0x80 to make an unauth address.
*/
static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
const struct usb_host_endpoint *ep,
const struct urb *urb, gfp_t gfp)
{
int result = 0;
struct device *dev = &wa->usb_iface->dev;
u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
#define AIM_CHECK(rdf, val, text) \
do { \
if (rpipe->descr.rdf != (val)) { \
dev_err(dev, \
"rpipe aim discrepancy: " #rdf " " text "\n", \
rpipe->descr.rdf, (val)); \
result = -EINVAL; \
WARN_ON(1); \
} \
} while (0)
AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)");
AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
"(%u vs %u)");
AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
#undef AIM_CHECK
return result;
}
#ifndef CONFIG_BUG
#define CONFIG_BUG 0
#endif
/*
* Make sure there is an rpipe allocated for an endpoint
*
* If already allocated, we just refcount it; if not, we get an
* idle one, aim it to the right location and take it.
*
* Attaches to ep->hcpriv and rpipe->ep to ep.
*/
int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
struct urb *urb, gfp_t gfp)
{
int result = 0;
struct device *dev = &wa->usb_iface->dev;
struct wa_rpipe *rpipe;
u8 eptype;
mutex_lock(&wa->rpipe_mutex);
rpipe = ep->hcpriv;
if (rpipe != NULL) {
if (CONFIG_BUG == 1) {
result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
if (result < 0)
goto error;
}
__rpipe_get(rpipe);
dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n",
ep->desc.bEndpointAddress,
le16_to_cpu(rpipe->descr.wRPipeIndex));
} else {
/* hmm, assign idle rpipe, aim it */
result = -ENOBUFS;
eptype = ep->desc.bmAttributes & 0x03;
result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
if (result < 0)
goto error;
result = rpipe_aim(rpipe, wa, ep, urb, gfp);
if (result < 0) {
rpipe_put(rpipe);
goto error;
}
ep->hcpriv = rpipe;
rpipe->ep = ep;
__rpipe_get(rpipe); /* for caching into ep->hcpriv */
dev_dbg(dev, "ep 0x%02x: using rpipe %u\n",
ep->desc.bEndpointAddress,
le16_to_cpu(rpipe->descr.wRPipeIndex));
}
error:
mutex_unlock(&wa->rpipe_mutex);
return result;
}
/*
* Allocate the bitmap for each rpipe.
*/
int wa_rpipes_create(struct wahc *wa)
{
wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
if (wa->rpipe_bm == NULL)
return -ENOMEM;
return 0;
}
void wa_rpipes_destroy(struct wahc *wa)
{
struct device *dev = &wa->usb_iface->dev;
if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
WARN_ON(1);
dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
wa->rpipes, wa->rpipe_bm);
}
bitmap_free(wa->rpipe_bm);
}
/*
* Release resources allocated for an endpoint
*
* If there is an associated rpipe to this endpoint, Abort any pending
* transfers and put it. If the rpipe ends up being destroyed,
* __rpipe_destroy() will cleanup ep->hcpriv.
*
* This is called before calling hcd->stop(), so you don't need to do
* anything else in there.
*/
void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
{
struct wa_rpipe *rpipe;
mutex_lock(&wa->rpipe_mutex);
rpipe = ep->hcpriv;
if (rpipe != NULL) {
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_ABORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
rpipe_put(rpipe);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
/* Clear the stalled status of an RPIPE. */
void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
{
struct wa_rpipe *rpipe;
mutex_lock(&wa->rpipe_mutex);
rpipe = ep->hcpriv;
if (rpipe != NULL) {
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);

File diff suppressed because it is too large Load Diff

View File

@ -1,490 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* sysfs glue, wusbcore module support and life cycle management
*
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* Creation/destruction of wusbhc is split in two parts; that that
* doesn't require the HCD to be added (wusbhc_{create,destroy}) and
* the one that requires (phase B, wusbhc_b_{create,destroy}).
*
* This is so because usb_add_hcd() will start the HC, and thus, all
* the HC specific stuff has to be already initialized (like sysfs
* thingies).
*/
#include <linux/device.h>
#include <linux/module.h>
#include "wusbhc.h"
/**
* Extract the wusbhc that corresponds to a USB Host Controller class device
*
* WARNING! Apply only if @dev is that of a
* wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
*/
static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
{
struct usb_bus *usb_bus = dev_get_drvdata(dev);
struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
return usb_hcd_to_wusbhc(usb_hcd);
}
/*
* Show & store the current WUSB trust timeout
*
* We don't do locking--it is an 'atomic' value.
*
* The units that we store/show are always MILLISECONDS. However, the
* value of trust_timeout is jiffies.
*/
static ssize_t wusb_trust_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
}
static ssize_t wusb_trust_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
ssize_t result = -ENOSYS;
unsigned trust_timeout;
result = sscanf(buf, "%u", &trust_timeout);
if (result != 1) {
result = -EINVAL;
goto out;
}
wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500);
cancel_delayed_work(&wusbhc->keep_alive_timer);
flush_workqueue(wusbd);
queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
msecs_to_jiffies(wusbhc->trust_timeout / 2));
out:
return result < 0 ? result : size;
}
static DEVICE_ATTR_RW(wusb_trust_timeout);
/*
* Show the current WUSB CHID.
*/
static ssize_t wusb_chid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
const struct wusb_ckhdid *chid;
if (wusbhc->wuie_host_info != NULL)
chid = &wusbhc->wuie_host_info->CHID;
else
chid = &wusb_ckhdid_zero;
return sprintf(buf, "%16ph\n", chid->data);
}
/*
* Store a new CHID.
*
* - Write an all zeros CHID and it will stop the controller
* - Write a non-zero CHID and it will start it.
*
* See wusbhc_chid_set() for more info.
*/
static ssize_t wusb_chid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
struct wusb_ckhdid chid;
ssize_t result;
result = sscanf(buf,
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx "
"%02hhx %02hhx %02hhx %02hhx\n",
&chid.data[0] , &chid.data[1] ,
&chid.data[2] , &chid.data[3] ,
&chid.data[4] , &chid.data[5] ,
&chid.data[6] , &chid.data[7] ,
&chid.data[8] , &chid.data[9] ,
&chid.data[10], &chid.data[11],
&chid.data[12], &chid.data[13],
&chid.data[14], &chid.data[15]);
if (result != 16) {
dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
"%d\n", (int)result);
return -EINVAL;
}
result = wusbhc_chid_set(wusbhc, &chid);
return result < 0 ? result : size;
}
static DEVICE_ATTR_RW(wusb_chid);
static ssize_t wusb_phy_rate_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
return sprintf(buf, "%d\n", wusbhc->phy_rate);
}
static ssize_t wusb_phy_rate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
uint8_t phy_rate;
ssize_t result;
result = sscanf(buf, "%hhu", &phy_rate);
if (result != 1)
return -EINVAL;
if (phy_rate >= UWB_PHY_RATE_INVALID)
return -EINVAL;
wusbhc->phy_rate = phy_rate;
return size;
}
static DEVICE_ATTR_RW(wusb_phy_rate);
static ssize_t wusb_dnts_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
return sprintf(buf, "num slots: %d\ninterval: %dms\n",
wusbhc->dnts_num_slots, wusbhc->dnts_interval);
}
static ssize_t wusb_dnts_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
uint8_t num_slots, interval;
ssize_t result;
result = sscanf(buf, "%hhu %hhu", &num_slots, &interval);
if (result != 2)
return -EINVAL;
wusbhc->dnts_num_slots = num_slots;
wusbhc->dnts_interval = interval;
return size;
}
static DEVICE_ATTR_RW(wusb_dnts);
static ssize_t wusb_retry_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
return sprintf(buf, "%d\n", wusbhc->retry_count);
}
static ssize_t wusb_retry_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
uint8_t retry_count;
ssize_t result;
result = sscanf(buf, "%hhu", &retry_count);
if (result != 1)
return -EINVAL;
wusbhc->retry_count = max_t(uint8_t, retry_count,
WUSB_RETRY_COUNT_MAX);
return size;
}
static DEVICE_ATTR_RW(wusb_retry_count);
/* Group all the WUSBHC attributes */
static struct attribute *wusbhc_attrs[] = {
&dev_attr_wusb_trust_timeout.attr,
&dev_attr_wusb_chid.attr,
&dev_attr_wusb_phy_rate.attr,
&dev_attr_wusb_dnts.attr,
&dev_attr_wusb_retry_count.attr,
NULL,
};
static const struct attribute_group wusbhc_attr_group = {
.name = NULL, /* we want them in the same directory */
.attrs = wusbhc_attrs,
};
/*
* Create a wusbhc instance
*
* NOTEs:
*
* - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
* initialized but not added.
*
* - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
*
* - fill out wusbhc->uwb_rc and refcount it before calling
* - fill out the wusbhc->sec_modes array
*/
int wusbhc_create(struct wusbhc *wusbhc)
{
int result = 0;
/* set defaults. These can be overwritten using sysfs attributes. */
wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
wusbhc->dnts_num_slots = 4;
wusbhc->dnts_interval = 2;
wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE;
mutex_init(&wusbhc->mutex);
result = wusbhc_mmcie_create(wusbhc);
if (result < 0)
goto error_mmcie_create;
result = wusbhc_devconnect_create(wusbhc);
if (result < 0)
goto error_devconnect_create;
result = wusbhc_rh_create(wusbhc);
if (result < 0)
goto error_rh_create;
result = wusbhc_sec_create(wusbhc);
if (result < 0)
goto error_sec_create;
return 0;
error_sec_create:
wusbhc_rh_destroy(wusbhc);
error_rh_create:
wusbhc_devconnect_destroy(wusbhc);
error_devconnect_create:
wusbhc_mmcie_destroy(wusbhc);
error_mmcie_create:
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_create);
static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
{
return &wusbhc->usb_hcd.self.controller->kobj;
}
/*
* Phase B of a wusbhc instance creation
*
* Creates fields that depend on wusbhc->usb_hcd having been
* added. This is where we create the sysfs files in
* /sys/class/usb_host/usb_hostX/.
*
* NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
* layer (hwahc or whci)
*/
int wusbhc_b_create(struct wusbhc *wusbhc)
{
int result = 0;
struct device *dev = wusbhc->usb_hcd.self.controller;
result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
if (result < 0) {
dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
result);
goto error_create_attr_group;
}
return 0;
error_create_attr_group:
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_b_create);
void wusbhc_b_destroy(struct wusbhc *wusbhc)
{
wusbhc_pal_unregister(wusbhc);
sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
}
EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
void wusbhc_destroy(struct wusbhc *wusbhc)
{
wusbhc_sec_destroy(wusbhc);
wusbhc_rh_destroy(wusbhc);
wusbhc_devconnect_destroy(wusbhc);
wusbhc_mmcie_destroy(wusbhc);
}
EXPORT_SYMBOL_GPL(wusbhc_destroy);
struct workqueue_struct *wusbd;
EXPORT_SYMBOL_GPL(wusbd);
/*
* WUSB Cluster ID allocation map
*
* Each WUSB bus in a channel is identified with a Cluster Id in the
* unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
* (that's space for 31 WUSB controllers, as 0xff can't be taken). We
* start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
*
* For each one we taken, we pin it in the bitap
*/
#define CLUSTER_IDS 32
static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
/*
* Get a WUSB Cluster ID
*
* Need to release with wusb_cluster_id_put() when done w/ it.
*/
/* FIXME: coordinate with the choose_addres() from the USB stack */
/* we want to leave the top of the 128 range for cluster addresses and
* the bottom for device addresses (as we map them one on one with
* ports). */
u8 wusb_cluster_id_get(void)
{
u8 id;
spin_lock(&wusb_cluster_ids_lock);
id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
if (id >= CLUSTER_IDS) {
id = 0;
goto out;
}
set_bit(id, wusb_cluster_id_table);
id = (u8) 0xff - id;
out:
spin_unlock(&wusb_cluster_ids_lock);
return id;
}
EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
/*
* Release a WUSB Cluster ID
*
* Obtained it with wusb_cluster_id_get()
*/
void wusb_cluster_id_put(u8 id)
{
id = 0xff - id;
BUG_ON(id >= CLUSTER_IDS);
spin_lock(&wusb_cluster_ids_lock);
WARN_ON(!test_bit(id, wusb_cluster_id_table));
clear_bit(id, wusb_cluster_id_table);
spin_unlock(&wusb_cluster_ids_lock);
}
EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
/**
* wusbhc_giveback_urb - return an URB to the USB core
* @wusbhc: the host controller the URB is from.
* @urb: the URB.
* @status: the URB's status.
*
* Return an URB to the USB core doing some additional WUSB specific
* processing.
*
* - After a successful transfer, update the trust timeout timestamp
* for the WUSB device.
*
* - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
* condition for the WCONNECTACK_IE is that the host has observed
* the associated device responding to a control transfer.
*/
void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
{
struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
urb->dev);
if (status == 0 && wusb_dev) {
wusb_dev->entry_ts = jiffies;
/* wusbhc_devconnect_acked() can't be called from
atomic context so defer it to a work queue. */
if (!list_empty(&wusb_dev->cack_node))
queue_work(wusbd, &wusb_dev->devconnect_acked_work);
else
wusb_dev_put(wusb_dev);
}
usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
}
EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
/**
* wusbhc_reset_all - reset the HC hardware
* @wusbhc: the host controller to reset.
*
* Request a full hardware reset of the chip. This will also reset
* the radio controller and any other PALs.
*/
void wusbhc_reset_all(struct wusbhc *wusbhc)
{
if (wusbhc->uwb_rc)
uwb_rc_reset_all(wusbhc->uwb_rc);
}
EXPORT_SYMBOL_GPL(wusbhc_reset_all);
static struct notifier_block wusb_usb_notifier = {
.notifier_call = wusb_usb_ncb,
.priority = INT_MAX /* Need to be called first of all */
};
static int __init wusbcore_init(void)
{
int result;
result = wusb_crypto_init();
if (result < 0)
goto error_crypto_init;
/* WQ is singlethread because we need to serialize notifications */
wusbd = create_singlethread_workqueue("wusbd");
if (wusbd == NULL) {
result = -ENOMEM;
printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
goto error_wusbd_create;
}
usb_register_notify(&wusb_usb_notifier);
bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
return 0;
error_wusbd_create:
wusb_crypto_exit();
error_crypto_init:
return result;
}
module_init(wusbcore_init);
static void __exit wusbcore_exit(void)
{
clear_bit(0, wusb_cluster_id_table);
if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n",
CLUSTER_IDS, wusb_cluster_id_table);
WARN_ON(1);
}
usb_unregister_notify(&wusb_usb_notifier);
destroy_workqueue(wusbd);
wusb_crypto_exit();
}
module_exit(wusbcore_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB core");
MODULE_LICENSE("GPL");

View File

@ -1,487 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Common infrastructure for WHCI and HWA WUSB-HC drivers
*
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This driver implements parts common to all Wireless USB Host
* Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
* by:
*
* - hwahc: HWA, USB-dongle that implements a Wireless USB host
* controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
*
* - whci: WHCI, a PCI card with a wireless host controller
* (Wireless Host Controller Interface 1.0 specification).
*
* Check out the Design-overview.txt file in the source documentation
* for other details on the implementation.
*
* Main blocks:
*
* rh Root Hub emulation (part of the HCD glue)
*
* devconnect Handle all the issues related to device connection,
* authentication, disconnection, timeout, reseting,
* keepalives, etc.
*
* mmc MMC IE broadcasting handling
*
* A host controller driver just initializes its stuff and as part of
* that, creates a 'struct wusbhc' instance that handles all the
* common WUSB mechanisms. Links in the function ops that are specific
* to it and then registers the host controller. Ready to run.
*/
#ifndef __WUSBHC_H__
#define __WUSBHC_H__
#include <linux/usb.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <linux/usb/hcd.h>
#include "../uwb/uwb.h"
#include "include/wusb.h"
/*
* Time from a WUSB channel stop request to the last transmitted MMC.
*
* This needs to be > 4.096 ms in case no MMCs can be transmitted in
* zone 0.
*/
#define WUSB_CHANNEL_STOP_DELAY_MS 8
#define WUSB_RETRY_COUNT_MAX 15
#define WUSB_RETRY_COUNT_INFINITE 0
/**
* Wireless USB device
*
* Describe a WUSB device connected to the cluster. This struct
* belongs to the 'struct wusb_port' it is attached to and it is
* responsible for putting and clearing the pointer to it.
*
* Note this "complements" the 'struct usb_device' that the usb_hcd
* keeps for each connected USB device. However, it extends some
* information that is not available (there is no hcpriv ptr in it!)
* *and* most importantly, it's life cycle is different. It is created
* as soon as we get a DN_Connect (connect request notification) from
* the device through the WUSB host controller; the USB stack doesn't
* create the device until we authenticate it. FIXME: this will
* change.
*
* @bos: This is allocated when the BOS descriptors are read from
* the device and freed upon the wusb_dev struct dying.
* @wusb_cap_descr: points into @bos, and has been verified to be size
* safe.
*/
struct wusb_dev {
struct kref refcnt;
struct wusbhc *wusbhc;
struct list_head cack_node; /* Connect-Ack list */
struct list_head rekey_node; /* GTK rekey list */
u8 port_idx;
u8 addr;
u8 beacon_type:4;
struct usb_encryption_descriptor ccm1_etd;
struct wusb_ckhdid cdid;
unsigned long entry_ts;
struct usb_bos_descriptor *bos;
struct usb_wireless_cap_descriptor *wusb_cap_descr;
struct uwb_mas_bm availability;
struct work_struct devconnect_acked_work;
struct usb_device *usb_dev;
};
#define WUSB_DEV_ADDR_UNAUTH 0x80
static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
{
kref_init(&wusb_dev->refcnt);
/* no need to init the cack_node */
}
extern void wusb_dev_destroy(struct kref *_wusb_dev);
static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
{
kref_get(&wusb_dev->refcnt);
return wusb_dev;
}
static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
{
kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
}
/**
* Wireless USB Host Controller root hub "fake" ports
* (state and device information)
*
* Wireless USB is wireless, so there are no ports; but we
* fake'em. Each RC can connect a max of devices at the same time
* (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
* caps), referred to in wusbhc->ports_max.
*
* See rh.c for more information.
*
* The @status and @change use the same bits as in USB2.0[11.24.2.7],
* so we don't have to do much when getting the port's status.
*
* WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
* include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
*/
struct wusb_port {
u16 status;
u16 change;
struct wusb_dev *wusb_dev; /* connected device's info */
u32 ptk_tkid;
};
/**
* WUSB Host Controller specifics
*
* All fields that are common to all Wireless USB controller types
* (HWA and WHCI) are grouped here. Host Controller
* functions/operations that only deal with general Wireless USB HC
* issues use this data type to refer to the host.
*
* @usb_hcd Instantiation of a USB host controller
* (initialized by upper layer [HWA=HC or WHCI].
*
* @dev Device that implements this; initialized by the
* upper layer (HWA-HC, WHCI...); this device should
* have a refcount.
*
* @trust_timeout After this time without hearing for device
* activity, we consider the device gone and we have to
* re-authenticate.
*
* Can be accessed w/o locking--however, read to a
* local variable then use.
*
* @chid WUSB Cluster Host ID: this is supposed to be a
* unique value that doesn't change across reboots (so
* that your devices do not require re-association).
*
* Read/Write protected by @mutex
*
* @dev_info This array has ports_max elements. It is used to
* give the HC information about the WUSB devices (see
* 'struct wusb_dev_info').
*
* For HWA we need to allocate it in heap; for WHCI it
* needs to be permanently mapped, so we keep it for
* both and make it easy. Call wusbhc->dev_info_set()
* to update an entry.
*
* @ports_max Number of simultaneous device connections (fake
* ports) this HC will take. Read-only.
*
* @port Array of port status for each fake root port. Guaranteed to
* always be the same length during device existence
* [this allows for some unlocked but referenced reading].
*
* @mmcies_max Max number of Information Elements this HC can send
* in its MMC. Read-only.
*
* @start Start the WUSB channel.
*
* @stop Stop the WUSB channel after the specified number of
* milliseconds. Channel Stop IEs should be transmitted
* as required by [WUSB] 4.16.2.1.
*
* @mmcie_add HC specific operation (WHCI or HWA) for adding an
* MMCIE.
*
* @mmcie_rm HC specific operation (WHCI or HWA) for removing an
* MMCIE.
*
* @set_ptk: Set the PTK and enable encryption for a device. Or, if
* the supplied key is NULL, disable encryption for that
* device.
*
* @set_gtk: Set the GTK to be used for all future broadcast packets
* (i.e., MMCs). With some hardware, setting the GTK may start
* MMC transmission.
*
* NOTE:
*
* - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
* (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
* is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
* refcount on it).
*
* Most of the times when you need to use it, it will be non-NULL,
* so there is no real need to check for it (wusb_dev will
* disappear before usb_dev).
*
* - The following fields need to be filled out before calling
* wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
*
* - there is no wusbhc_init() method, we do everything in
* wusbhc_create().
*
* - Creation is done in two phases, wusbhc_create() and
* wusbhc_create_b(); b are the parts that need to be called after
* calling usb_hcd_add(&wusbhc->usb_hcd).
*/
struct wusbhc {
struct usb_hcd usb_hcd; /* HAS TO BE 1st */
struct device *dev;
struct uwb_rc *uwb_rc;
struct uwb_pal pal;
unsigned trust_timeout; /* in jiffies */
struct wusb_ckhdid chid;
uint8_t phy_rate;
uint8_t dnts_num_slots;
uint8_t dnts_interval;
uint8_t retry_count;
struct wuie_host_info *wuie_host_info;
struct mutex mutex; /* locks everything else */
u16 cluster_id; /* Wireless USB Cluster ID */
struct wusb_port *port; /* Fake port status handling */
struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
u8 ports_max;
unsigned active:1; /* currently xmit'ing MMCs */
struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
struct delayed_work keep_alive_timer;
struct list_head cack_list; /* Connect acknowledging */
size_t cack_count; /* protected by 'mutex' */
struct wuie_connect_ack cack_ie;
struct uwb_rsv *rsv; /* cluster bandwidth reservation */
struct mutex mmcie_mutex; /* MMC WUIE handling */
struct wuie_hdr **mmcie; /* WUIE array */
u8 mmcies_max;
/* FIXME: make wusbhc_ops? */
int (*start)(struct wusbhc *wusbhc);
void (*stop)(struct wusbhc *wusbhc, int delay);
int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
u8 handle, struct wuie_hdr *wuie);
int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
const struct uwb_mas_bm *);
int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
u32 tkid, const void *key, size_t key_size);
int (*set_gtk)(struct wusbhc *wusbhc,
u32 tkid, const void *key, size_t key_size);
int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
struct {
struct usb_key_descriptor descr;
u8 data[16]; /* GTK key data */
} __attribute__((packed)) gtk;
u8 gtk_index;
u32 gtk_tkid;
/* workqueue for WUSB security related tasks. */
struct workqueue_struct *wq_security;
struct work_struct gtk_rekey_work;
struct usb_encryption_descriptor *ccm1_etd;
};
#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
extern int wusbhc_create(struct wusbhc *);
extern int wusbhc_b_create(struct wusbhc *);
extern void wusbhc_b_destroy(struct wusbhc *);
extern void wusbhc_destroy(struct wusbhc *);
extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
struct wusb_dev *);
extern void wusb_dev_sysfs_rm(struct wusb_dev *);
extern int wusbhc_sec_create(struct wusbhc *);
extern int wusbhc_sec_start(struct wusbhc *);
extern void wusbhc_sec_stop(struct wusbhc *);
extern void wusbhc_sec_destroy(struct wusbhc *);
extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
int status);
void wusbhc_reset_all(struct wusbhc *wusbhc);
int wusbhc_pal_register(struct wusbhc *wusbhc);
void wusbhc_pal_unregister(struct wusbhc *wusbhc);
/*
* Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
*
* @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
*
* This is a safe assumption as @usb_dev->bus is referenced all the
* time during the @usb_dev life cycle.
*/
static inline
struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
{
struct usb_hcd *usb_hcd;
usb_hcd = bus_to_hcd(usb_dev->bus);
return usb_get_hcd(usb_hcd);
}
/*
* Increment the reference count on a wusbhc.
*
* @wusbhc's life cycle is identical to that of the underlying usb_hcd.
*/
static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
{
return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
}
/*
* Return the wusbhc associated to a @usb_dev
*
* @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
*
* @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
* WARNING: referenced at the usb_hcd level, unlocked
*
* FIXME: move offline
*/
static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
{
struct wusbhc *wusbhc = NULL;
struct usb_hcd *usb_hcd;
if (usb_dev->devnum > 1 && !usb_dev->wusb) {
/* but root hubs */
dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
usb_dev->wusb);
BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
}
usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
if (usb_hcd == NULL)
return NULL;
BUG_ON(usb_hcd->wireless == 0);
return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
}
static inline void wusbhc_put(struct wusbhc *wusbhc)
{
usb_put_hcd(&wusbhc->usb_hcd);
}
int wusbhc_start(struct wusbhc *wusbhc);
void wusbhc_stop(struct wusbhc *wusbhc);
extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
/* Device connect handling */
extern int wusbhc_devconnect_create(struct wusbhc *);
extern void wusbhc_devconnect_destroy(struct wusbhc *);
extern int wusbhc_devconnect_start(struct wusbhc *wusbhc);
extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
struct wusb_dn_hdr *dn_hdr, size_t size);
extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
void *priv);
extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
u8 addr);
/* Wireless USB fake Root Hub methods */
extern int wusbhc_rh_create(struct wusbhc *);
extern void wusbhc_rh_destroy(struct wusbhc *);
extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
/* MMC handling */
extern int wusbhc_mmcie_create(struct wusbhc *);
extern void wusbhc_mmcie_destroy(struct wusbhc *);
extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
struct wuie_hdr *);
extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
/* Bandwidth reservation */
int wusbhc_rsv_establish(struct wusbhc *wusbhc);
void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
/*
* I've always said
* I wanted a wedding in a church...
*
* but lately I've been thinking about
* the Botanical Gardens.
*
* We could do it by the tulips.
* It'll be beautiful
*
* --Security!
*/
extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
struct wusb_dev *);
extern void wusb_dev_sec_rm(struct wusb_dev *) ;
extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
struct wusb_ckhdid *ck);
void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
/* WUSB Cluster ID handling */
extern u8 wusb_cluster_id_get(void);
extern void wusb_cluster_id_put(u8);
/*
* wusb_port_by_idx - return the port associated to a zero-based port index
*
* NOTE: valid without locking as long as wusbhc is referenced (as the
* number of ports doesn't change). The data pointed to has to
* be verified though :)
*/
static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
u8 port_idx)
{
return &wusbhc->port[port_idx];
}
/*
* wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
* a port_idx.
*
* USB stack USB ports are 1 based!!
*
* NOTE: only valid for WUSB devices!!!
*/
static inline u8 wusb_port_no_to_idx(u8 port_no)
{
return port_no - 1;
}
extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
struct usb_device *);
/*
* Return a referenced wusb_dev given a @usb_dev
*
* Returns NULL if the usb_dev is being torn down.
*
* FIXME: move offline
*/
static inline
struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
{
struct wusbhc *wusbhc;
struct wusb_dev *wusb_dev;
wusbhc = wusbhc_get_by_usb_dev(usb_dev);
if (wusbhc == NULL)
return NULL;
mutex_lock(&wusbhc->mutex);
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
mutex_unlock(&wusbhc->mutex);
wusbhc_put(wusbhc);
return wusb_dev;
}
/* Misc */
extern struct workqueue_struct *wusbd;
#endif /* #ifndef __WUSBHC_H__ */