staging: fwserial: Add TTY-over-Firewire serial driver

This patch provides the kernel driver for high-speed TTY
communication over the IEEE 1394 bus.

Signed-off-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Peter Hurley 2012-11-02 08:16:33 -04:00 committed by Greg Kroah-Hartman
parent fd985e1def
commit 7355ba3445
9 changed files with 3824 additions and 0 deletions

View file

@ -146,4 +146,6 @@ source "drivers/staging/dgrp/Kconfig"
source "drivers/staging/sb105x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
endif # STAGING

View file

@ -65,3 +65,4 @@ obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/

View file

@ -0,0 +1,9 @@
config FIREWIRE_SERIAL
tristate "TTY over Firewire"
depends on FIREWIRE
help
This enables TTY over IEEE 1394, providing high-speed serial
connectivity to cabled peers.
To compile this driver as a module, say M here: the module will
be called firewire-serial.

View file

@ -0,0 +1,2 @@
obj-$(CONFIG_FIREWIRE_SERIAL) += firewire-serial.o
firewire-serial-objs := fwserial.o dma_fifo.o

View file

@ -0,0 +1,37 @@
TODOs
-----
1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- I/O is handled asynchronously which presents some issues when error
conditions occur.
2. Implement _robust_ console on top of this. The existing prototype console
driver is not ready for the big leagues yet.
3. Expose means of controlling attach/detach of peers via sysfs. Include
GUID-to-port matching/whitelist/blacklist.
-- Issues with firewire stack --
1. This driver uses the same unregistered vendor id that the firewire core does
(0xd00d1e). Perhaps this could be exposed as a define in
firewire-constants.h?
2. MAX_ASYNC_PAYLOAD needs to be publicly exposed by core/ohci
- otherwise how will this driver know the max size of address window to
open for one packet write?
3. Maybe device_max_receive() and link_speed_to_max_payload() should be
taken up by the firewire core?
4. To avoid dropping rx data while still limiting the maximum buffering,
the size of the AR context must be known. How to expose this to drivers?
5. Explore if bigger AR context will reduce RCODE_BUSY responses
(or auto-grow to certain max size -- but this would require major surgery
as the current AR is contiguously mapped)
-- Issues with TTY core --
1. Hack for alternate device name scheme
- because udev no longer allows device renaming, devices should have
their proper names on creation. This is an issue for creating the
fwloop<n> device with the fwtty<n> devices because although duplicating
roughly the same operations as tty_port_register_device() isn't difficult,
access to the tty_class & tty_fops is restricted in scope.
This is currently being worked around in create_loop_device() by
extracting the tty_class ptr and tty_fops ptr from the previously created
tty devices. Perhaps an add'l api can be added -- eg.,
tty_{port_}register_named_device().

View file

@ -0,0 +1,310 @@
/*
* DMA-able FIFO implementation
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/bug.h>
#include "dma_fifo.h"
#ifdef DEBUG_TRACING
#define df_trace(s, args...) pr_debug(s, ##args)
#else
#define df_trace(s, args...)
#endif
#define FAIL(fifo, condition, format...) ({ \
fifo->corrupt = !!(condition); \
if (unlikely(fifo->corrupt)) { \
__WARN_printf(format); \
} \
unlikely(fifo->corrupt); \
})
/*
* private helper fn to determine if check is in open interval (lo,hi)
*/
static bool addr_check(unsigned check, unsigned lo, unsigned hi)
{
return check - (lo + 1) < (hi - 1) - lo;
}
/**
* dma_fifo_init: initialize the fifo to a valid but inoperative state
* @fifo: address of in-place "struct dma_fifo" object
*/
void dma_fifo_init(struct dma_fifo *fifo)
{
memset(fifo, 0, sizeof(*fifo));
INIT_LIST_HEAD(&fifo->pending);
}
/**
* dma_fifo_alloc - initialize and allocate dma_fifo
* @fifo: address of in-place "struct dma_fifo" object
* @size: 'apparent' size, in bytes, of fifo
* @align: dma alignment to maintain (should be at least cpu cache alignment),
* must be power of 2
* @tx_limit: maximum # of bytes transmissable per dma (rounded down to
* multiple of alignment, but at least align size)
* @open_limit: maximum # of outstanding dma transactions allowed
* @gfp_mask: get_free_pages mask, passed to kmalloc()
*
* The 'apparent' size will be rounded up to next greater aligned size.
* Returns 0 if no error, otherwise an error code
*/
int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
int tx_limit, int open_limit, gfp_t gfp_mask)
{
int capacity;
if (!is_power_of_2(align) || size < 0)
return -EINVAL;
size = round_up(size, align);
capacity = size + align * open_limit + align * DMA_FIFO_GUARD;
fifo->data = kmalloc(capacity, gfp_mask);
if (!fifo->data)
return -ENOMEM;
fifo->in = 0;
fifo->out = 0;
fifo->done = 0;
fifo->size = size;
fifo->avail = size;
fifo->align = align;
fifo->tx_limit = max_t(int, round_down(tx_limit, align), align);
fifo->open = 0;
fifo->open_limit = open_limit;
fifo->guard = size + align * open_limit;
fifo->capacity = capacity;
fifo->corrupt = 0;
return 0;
}
/**
* dma_fifo_free - frees the fifo
* @fifo: address of in-place "struct dma_fifo" to free
*
* Also reinits the fifo to a valid but inoperative state. This
* allows the fifo to be reused with a different target requiring
* different fifo parameters.
*/
void dma_fifo_free(struct dma_fifo *fifo)
{
struct dma_pending *pending, *next;
if (fifo->data == NULL)
return;
list_for_each_entry_safe(pending, next, &fifo->pending, link)
list_del_init(&pending->link);
kfree(fifo->data);
fifo->data = NULL;
}
/**
* dma_fifo_reset - dumps the fifo contents and reinits for reuse
* @fifo: address of in-place "struct dma_fifo" to reset
*/
void dma_fifo_reset(struct dma_fifo *fifo)
{
struct dma_pending *pending, *next;
if (fifo->data == NULL)
return;
list_for_each_entry_safe(pending, next, &fifo->pending, link)
list_del_init(&pending->link);
fifo->in = 0;
fifo->out = 0;
fifo->done = 0;
fifo->avail = fifo->size;
fifo->open = 0;
fifo->corrupt = 0;
}
/**
* dma_fifo_in - copies data into the fifo
* @fifo: address of in-place "struct dma_fifo" to write to
* @src: buffer to copy from
* @n: # of bytes to copy
*
* Returns the # of bytes actually copied, which can be less than requested if
* the fifo becomes full. If < 0, return is error code.
*/
int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
{
int ofs, l;
if (fifo->data == NULL)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
if (n > fifo->avail)
n = fifo->avail;
if (n <= 0)
return 0;
ofs = fifo->in % fifo->capacity;
l = min(n, fifo->capacity - ofs);
memcpy(fifo->data + ofs, src, l);
memcpy(fifo->data, src + l, n - l);
if (FAIL(fifo, addr_check(fifo->done, fifo->in, fifo->in + n) ||
fifo->avail < n,
"fifo corrupt: in:%u out:%u done:%u n:%d avail:%d",
fifo->in, fifo->out, fifo->done, n, fifo->avail))
return -ENXIO;
fifo->in += n;
fifo->avail -= n;
df_trace("in:%u out:%u done:%u n:%d avail:%d", fifo->in, fifo->out,
fifo->done, n, fifo->avail);
return n;
}
/**
* dma_fifo_out_pend - gets address/len of next avail read and marks as pended
* @fifo: address of in-place "struct dma_fifo" to read from
* @pended: address of structure to fill with read address/len
* The data/len fields will be NULL/0 if no dma is pended.
*
* Returns the # of used bytes remaining in fifo (ie, if > 0, more data
* remains in the fifo that was not pended). If < 0, return is error code.
*/
int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
{
unsigned len, n, ofs, l, limit;
if (fifo->data == NULL)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
pended->len = 0;
pended->data = NULL;
pended->out = fifo->out;
len = fifo->in - fifo->out;
if (!len)
return -ENODATA;
if (fifo->open == fifo->open_limit)
return -EAGAIN;
n = len;
ofs = fifo->out % fifo->capacity;
l = fifo->capacity - ofs;
limit = min_t(unsigned, l, fifo->tx_limit);
if (n > limit) {
n = limit;
fifo->out += limit;
} else if (ofs + n > fifo->guard) {
fifo->out += l;
fifo->in = fifo->out;
} else {
fifo->out += round_up(n, fifo->align);
fifo->in = fifo->out;
}
df_trace("in: %u out: %u done: %u n: %d len: %u avail: %d", fifo->in,
fifo->out, fifo->done, n, len, fifo->avail);
pended->len = n;
pended->data = fifo->data + ofs;
pended->next = fifo->out;
list_add_tail(&pended->link, &fifo->pending);
++fifo->open;
if (FAIL(fifo, fifo->open > fifo->open_limit,
"past open limit:%d (limit:%d)",
fifo->open, fifo->open_limit))
return -ENXIO;
if (FAIL(fifo, fifo->out & (fifo->align - 1),
"fifo out unaligned:%u (align:%u)",
fifo->out, fifo->align))
return -ENXIO;
return len - n;
}
/**
* dma_fifo_out_complete - marks pended dma as completed
* @fifo: address of in-place "struct dma_fifo" which was read from
* @complete: address of structure for previously pended dma to mark completed
*/
int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete)
{
struct dma_pending *pending, *next, *tmp;
if (fifo->data == NULL)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
if (list_empty(&fifo->pending) && fifo->open == 0)
return -EINVAL;
if (FAIL(fifo, list_empty(&fifo->pending) != (fifo->open == 0),
"pending list disagrees with open count:%d",
fifo->open))
return -ENXIO;
tmp = complete->data;
*tmp = *complete;
list_replace(&complete->link, &tmp->link);
dp_mark_completed(tmp);
/* Only update the fifo in the original pended order */
list_for_each_entry_safe(pending, next, &fifo->pending, link) {
if (!dp_is_completed(pending)) {
df_trace("still pending: saved out: %u len: %d",
pending->out, pending->len);
break;
}
if (FAIL(fifo, pending->out != fifo->done ||
addr_check(fifo->in, fifo->done, pending->next),
"in:%u out:%u done:%u saved:%u next:%u",
fifo->in, fifo->out, fifo->done, pending->out,
pending->next))
return -ENXIO;
list_del_init(&pending->link);
fifo->done = pending->next;
fifo->avail += pending->len;
--fifo->open;
df_trace("in: %u out: %u done: %u len: %u avail: %d", fifo->in,
fifo->out, fifo->done, pending->len, fifo->avail);
}
if (FAIL(fifo, fifo->open < 0, "open dma:%d < 0", fifo->open))
return -ENXIO;
if (FAIL(fifo, fifo->avail > fifo->size, "fifo avail:%d > size:%d",
fifo->avail, fifo->size))
return -ENXIO;
return 0;
}

View file

@ -0,0 +1,130 @@
/*
* DMA-able FIFO interface
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _DMA_FIFO_H_
#define _DMA_FIFO_H_
/**
* The design basis for the DMA FIFO is to provide an output side that
* complies with the streaming DMA API design that can be DMA'd from directly
* (without additional copying), coupled with an input side that maintains a
* logically consistent 'apparent' size (ie, bytes in + bytes avail is static
* for the lifetime of the FIFO).
*
* DMA output transactions originate on a cache line boundary and can be
* variably-sized. DMA output transactions can be retired out-of-order but
* the FIFO will only advance the output in the original input sequence.
* This means the FIFO will eventually stall if a transaction is never retired.
*
* Chunking the output side into cache line multiples means that some FIFO
* memory is unused. For example, if all the avail input has been pended out,
* then the in and out markers are re-aligned to the next cache line.
* The maximum possible waste is
* (cache line alignment - 1) * (max outstanding dma transactions)
* This potential waste requires additional hidden capacity within the FIFO
* to be able to accept input while the 'apparent' size has not been reached.
*
* Additional cache lines (ie, guard area) are used to minimize DMA
* fragmentation when wrapping at the end of the FIFO. Input is allowed into the
* guard area, but the in and out FIFO markers are wrapped when DMA is pended.
*/
#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
struct dma_fifo {
unsigned in;
unsigned out; /* updated when dma is pended */
unsigned done; /* updated upon dma completion */
struct {
unsigned corrupt:1;
};
int size; /* 'apparent' size of fifo */
int guard; /* ofs of guard area */
int capacity; /* size + reserved */
int avail; /* # of unused bytes in fifo */
unsigned align; /* must be power of 2 */
int tx_limit; /* max # of bytes per dma transaction */
int open_limit; /* max # of outstanding allowed */
int open; /* # of outstanding dma transactions */
struct list_head pending; /* fifo markers for outstanding dma */
void *data;
};
struct dma_pending {
struct list_head link;
void *data;
unsigned len;
unsigned next;
unsigned out;
};
static inline void dp_mark_completed(struct dma_pending *dp)
{
dp->data += 1;
}
static inline bool dp_is_completed(struct dma_pending *dp)
{
return (unsigned long)dp->data & 1UL;
}
extern void dma_fifo_init(struct dma_fifo *fifo);
extern int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
int tx_limit, int open_limit, gfp_t gfp_mask);
extern void dma_fifo_free(struct dma_fifo *fifo);
extern void dma_fifo_reset(struct dma_fifo *fifo);
extern int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n);
extern int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended);
extern int dma_fifo_out_complete(struct dma_fifo *fifo,
struct dma_pending *complete);
/* returns the # of used bytes in the fifo */
static inline int dma_fifo_level(struct dma_fifo *fifo)
{
return fifo->size - fifo->avail;
}
/* returns the # of bytes ready for output in the fifo */
static inline int dma_fifo_out_level(struct dma_fifo *fifo)
{
return fifo->in - fifo->out;
}
/* returns the # of unused bytes in the fifo */
static inline int dma_fifo_avail(struct dma_fifo *fifo)
{
return fifo->avail;
}
/* returns true if fifo has max # of outstanding dmas */
static inline bool dma_fifo_busy(struct dma_fifo *fifo)
{
return fifo->open == fifo->open_limit;
}
/* changes the max size of dma returned from dma_fifo_out_pend() */
static inline int dma_fifo_change_tx_limit(struct dma_fifo *fifo, int tx_limit)
{
tx_limit = round_down(tx_limit, fifo->align);
fifo->tx_limit = max_t(int, tx_limit, fifo->align);
return 0;
}
#endif /* _DMA_FIFO_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,387 @@
#ifndef _FIREWIRE_FWSERIAL_H
#define _FIREWIRE_FWSERIAL_H
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/list.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include "dma_fifo.h"
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
static inline void profile_size_distrib(unsigned stat[], unsigned val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
}
#else
#define DISTRIBUTION_MAX_INDEX 0
#define profile_size_distrib(st, n)
#endif
/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
struct virt_plug_params {
__be32 status_hi;
__be32 status_lo;
__be32 fifo_hi;
__be32 fifo_lo;
__be32 fifo_len;
};
struct peer_work_params {
union {
struct virt_plug_params plug_req;
};
};
/**
* fwtty_peer: structure representing local & remote unit devices
* @unit: unit child device of fw_device node
* @serial: back pointer to associated fw_serial aggregate
* @guid: unique 64-bit guid for this unit device
* @generation: most recent bus generation
* @node_id: most recent node_id
* @speed: link speed of peer (0 = S100, 2 = S400, ... 5 = S3200)
* @mgmt_addr: bus addr region to write mgmt packets to
* @status_addr: bus addr register to write line status to
* @fifo_addr: bus addr region to write serial output to
* @fifo_len: max length for single write to fifo_addr
* @list: link for insertion into fw_serial's peer_list
* @rcu: for deferring peer reclamation
* @lock: spinlock to synchonize changes to state & port fields
* @work: only one work item can be queued at any one time
* Note: pending work is canceled prior to removal, so this
* peer is valid for at least the lifetime of the work function
* @work_params: parameter block for work functions
* @timer: timer for resetting peer state if remote request times out
* @state: current state
* @connect: work item for auto-connecting
* @connect_retries: # of connections already attempted
* @port: associated tty_port (usable if state == FWSC_ATTACHED)
*/
struct fwtty_peer {
struct fw_unit *unit;
struct fw_serial *serial;
u64 guid;
int generation;
int node_id;
unsigned speed;
int max_payload;
u64 mgmt_addr;
/* these are usable only if state == FWSC_ATTACHED */
u64 status_addr;
u64 fifo_addr;
int fifo_len;
struct list_head list;
struct rcu_head rcu;
spinlock_t lock;
struct work_struct work;
struct peer_work_params work_params;
struct timer_list timer;
int state;
struct delayed_work connect;
int connect_retries;
struct fwtty_port *port;
};
#define to_peer(ptr, field) (container_of(ptr, struct fwtty_peer, field))
/* state values for fwtty_peer.state field */
enum fwtty_peer_state {
FWPS_GONE,
FWPS_NOT_ATTACHED,
FWPS_ATTACHED,
FWPS_PLUG_PENDING,
FWPS_PLUG_RESPONDING,
FWPS_UNPLUG_PENDING,
FWPS_UNPLUG_RESPONDING,
FWPS_NO_MGMT_ADDR = -1,
};
#define CONNECT_RETRY_DELAY HZ
#define MAX_CONNECT_RETRIES 10
/* must be holding peer lock for these state funclets */
static inline void peer_set_state(struct fwtty_peer *peer, int new)
{
peer->state = new;
}
static inline struct fwtty_port *peer_revert_state(struct fwtty_peer *peer)
{
struct fwtty_port *port = peer->port;
peer->port = NULL;
peer_set_state(peer, FWPS_NOT_ATTACHED);
return port;
}
struct fwserial_mgmt_pkt {
struct {
__be16 len;
__be16 code;
} hdr;
union {
struct virt_plug_params plug_req;
struct virt_plug_params plug_rsp;
};
} __packed;
/* fwserial_mgmt_packet codes */
#define FWSC_RSP_OK 0x0000
#define FWSC_RSP_NACK 0x8000
#define FWSC_CODE_MASK 0x0fff
#define FWSC_VIRT_CABLE_PLUG 1
#define FWSC_VIRT_CABLE_UNPLUG 2
#define FWSC_VIRT_CABLE_PLUG_RSP 3
#define FWSC_VIRT_CABLE_UNPLUG_RSP 4
/* 1 min. plug timeout -- suitable for userland authorization */
#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
struct stats {
unsigned xchars;
unsigned dropped;
unsigned tx_stall;
unsigned fifo_errs;
unsigned sent;
unsigned lost;
unsigned throttled;
unsigned watermark;
unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1];
};
struct fwconsole_ops {
void (*notify)(int code, void *data);
void (*stats)(struct stats *stats, void *data);
void (*proc_show)(struct seq_file *m, void *data);
};
/* codes for console ops notify */
#define FWCON_NOTIFY_ATTACH 1
#define FWCON_NOTIFY_DETACH 2
struct buffered_rx {
struct list_head list;
size_t n;
unsigned char data[0];
};
/**
* fwtty_port: structure used to track/represent underlying tty_port
* @port: underlying tty_port
* @device: tty device
* @index: index into port_table for this particular port
* note: minor = index + FWSERIAL_TTY_START_MINOR
* @serial: back pointer to the containing fw_serial
* @rx_handler: bus address handler for unique addr region used by remotes
* to communicate with this port. Every port uses
* fwtty_port_handler() for per port transactions.
* @fwcon_ops: ops for attached fw_console (if any)
* @con_data: private data for fw_console
* @wait_tx: waitqueue for sleeping until writer/drain completes tx
* @emit_breaks: delayed work responsible for generating breaks when the
* break line status is active
* @cps : characters per second computed from the termios settings
* @break_last: timestamp in jiffies from last emit_breaks
* @hangup: work responsible for HUPing when carrier is dropped/lost
* @mstatus: loose virtualization of LSR/MSR
* bits 15..0 correspond to TIOCM_* bits
* bits 19..16 reserved for mctrl
* bit 20 OOB_TX_THROTTLE
* bits 23..21 reserved
* bits 31..24 correspond to UART_LSR_* bits
* @lock: spinlock for protecting concurrent access to fields below it
* @mctrl: loose virtualization of MCR
* bits 15..0 correspond to TIOCM_* bits
* bit 16 OOB_RX_THROTTLE
* bits 19..17 reserved
* bits 31..20 reserved for mstatus
* @drain: delayed work scheduled to ensure that writes are flushed.
* The work can race with the writer but concurrent sending is
* prevented with the IN_TX flag. Scheduled under lock to
* limit scheduling when fifo has just been drained.
* @push: work responsible for pushing buffered rx to the ldisc.
* rx can become buffered if the tty buffer is filled before the
* ldisc throttles the sender.
* @buf_list: list of buffered rx yet to be sent to ldisc
* @buffered: byte count of buffered rx
* @tx_fifo: fifo used to store & block-up writes for dma to remote
* @max_payload: max bytes transmissable per dma (based on peer's max_payload)
* @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
* @ignore_mask: UART_LSR_* bitmask of states to ignore (also based on termios)
* @break_ctl: if set, port is 'sending break' to remote
* @write_only: self-explanatory
* @overrun: previous rx was lost (partially or completely)
* @loopback: if set, port is in loopback mode
* @flags: atomic bit flags
* bit 0: IN_TX - gate to allow only one cpu to send from the dma fifo
* at a time.
* bit 1: STOP_TX - force tx to exit while sending
* @peer: rcu-pointer to associated fwtty_peer (if attached)
* NULL if no peer attached
* @icount: predefined statistics reported by the TIOCGICOUNT ioctl
* @stats: additional statistics reported in /proc/tty/driver/firewire_serial
*/
struct fwtty_port {
struct tty_port port;
struct device *device;
unsigned index;
struct fw_serial *serial;
struct fw_address_handler rx_handler;
struct fwconsole_ops *fwcon_ops;
void *con_data;
wait_queue_head_t wait_tx;
struct delayed_work emit_breaks;
unsigned cps;
unsigned long break_last;
struct work_struct hangup;
unsigned mstatus;
spinlock_t lock;
unsigned mctrl;
struct delayed_work drain;
struct work_struct push;
struct list_head buf_list;
int buffered;
struct dma_fifo tx_fifo;
int max_payload;
unsigned status_mask;
unsigned ignore_mask;
unsigned break_ctl:1,
write_only:1,
overrun:1,
loopback:1;
unsigned long flags;
struct fwtty_peer *peer;
struct async_icount icount;
struct stats stats;
};
#define to_port(ptr, field) (container_of(ptr, struct fwtty_port, field))
/* bit #s for flags field */
#define IN_TX 0
#define STOP_TX 1
#define BUFFERING_RX 2
/* bitmasks for special mctrl/mstatus bits */
#define OOB_RX_THROTTLE 0x00010000
#define MCTRL_RSRVD 0x000e0000
#define OOB_TX_THROTTLE 0x00100000
#define MSTATUS_RSRVD 0x00e00000
#define MCTRL_MASK (TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | \
TIOCM_LOOP | OOB_RX_THROTTLE | MCTRL_RSRVD)
/* XXX even every 1/50th secs. may be unnecessarily accurate */
/* delay in jiffies between brk emits */
#define FREQ_BREAKS (HZ / 50)
/* Ports are allocated in blocks of num_ports for each fw_card */
#define MAX_CARD_PORTS 32 /* max # of ports per card */
#define MAX_TOTAL_PORTS 64 /* max # of ports total */
/* tuning parameters */
#define FWTTY_PORT_TXFIFO_LEN 4096
#define FWTTY_PORT_MAX_PEND_DMA 8 /* costs a cache line per pend */
#define DRAIN_THRESHOLD 1024
#define MAX_ASYNC_PAYLOAD 4096 /* ohci-defined limit */
#define WRITER_MINIMUM 128
/* TODO: how to set watermark to AR context size? see fwtty_rx() */
#define HIGH_WATERMARK 32768 /* AR context is 32K */
/*
* Size of bus addr region above 4GB used per port as the recv addr
* - must be at least as big as the MAX_ASYNC_PAYLOAD
*/
#define FWTTY_PORT_RXFIFO_LEN MAX_ASYNC_PAYLOAD
/**
* fw_serial: aggregate used to associate tty ports with specific fw_card
* @card: fw_card associated with this fw_serial device (1:1 association)
* @kref: reference-counted multi-port management allows delayed destroy
* @self: local unit device as 'peer'. Not valid until local unit device
* is enumerated.
* @list: link for insertion into fwserial_list
* @peer_list: list of local & remote unit devices attached to this card
* @ports: fixed array of tty_ports provided by this serial device
*/
struct fw_serial {
struct fw_card *card;
struct kref kref;
struct fwtty_peer *self;
struct list_head list;
struct list_head peer_list;
struct fwtty_port *ports[MAX_CARD_PORTS];
};
#define to_serial(ptr, field) (container_of(ptr, struct fw_serial, field))
#define TTY_DEV_NAME "fwtty" /* ttyFW was taken */
static const char tty_dev_name[] = TTY_DEV_NAME;
static const char loop_dev_name[] = "fwloop";
extern bool limit_bw;
struct tty_driver *fwtty_driver;
#define driver_err(s, v...) pr_err(KBUILD_MODNAME ": " s, ##v)
struct fwtty_port *fwtty_port_get(unsigned index);
void fwtty_port_put(struct fwtty_port *port);
static inline void fwtty_bind_console(struct fwtty_port *port,
struct fwconsole_ops *fwcon_ops,
void *data)
{
port->con_data = data;
port->fwcon_ops = fwcon_ops;
}
/*
* Returns the max send async payload size in bytes based on the unit device
* link speed - if set to limit bandwidth to max 20%, use lookup table
*/
static inline int link_speed_to_max_payload(unsigned speed)
{
static const int max_async[] = { 307, 614, 1229, 2458, 4916, 9832, };
BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_3200);
speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_3200);
if (limit_bw)
return max_async[speed];
else
return 1 << (speed + 9);
}
#endif /* _FIREWIRE_FWSERIAL_H */