staging:iio: remove ability to escalate events.

Whilst it is possible to output events to say buffers have passed
a particular level there are no obvious reasons to actually do so.

The upshot of this patch is that buffers will only ever have
one threshold turned on at a time.

For now sca3000 has it's ring buffer effectively disabled.
Fixed later in series.

Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Jonathan Cameron 2011-05-18 14:40:54 +01:00 committed by Greg Kroah-Hartman
parent 298cd976e0
commit 939606d5cc
7 changed files with 16 additions and 96 deletions

View file

@ -45,9 +45,12 @@
* Currently does not provide timestamps. As the hardware doesn't add them they
* can only be inferred approximately from ring buffer events such as 50% full
* and knowledge of when buffer was last emptied. This is left to userspace.
*
* Temporarily deliberately broken.
**/
static int sca3000_read_first_n_hw_rb(struct iio_ring_buffer *r,
size_t count, u8 **data, int *dead_offset)
size_t count, char __user *buf,
int *dead_offset)
{
struct iio_hw_ring_buffer *hw_ring = iio_to_hw_ring_buf(r);
struct iio_dev *indio_dev = hw_ring->private;
@ -56,6 +59,8 @@ static int sca3000_read_first_n_hw_rb(struct iio_ring_buffer *r,
s16 *samples;
int ret, i, num_available, num_read = 0;
int bytes_per_sample = 1;
u8 *datas;
u8 **data = &datas;
if (st->bpse == 11)
bytes_per_sample = 2;
@ -353,9 +358,9 @@ void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
void sca3000_ring_int_process(u8 val, struct iio_ring_buffer *ring)
{
if (val & SCA3000_INT_STATUS_THREE_QUARTERS)
iio_push_or_escallate_ring_event(ring,
IIO_EVENT_CODE_RING_75_FULL,
0);
iio_push_ring_event(ring,
IIO_EVENT_CODE_RING_75_FULL,
0);
else if (val & SCA3000_INT_STATUS_HALF)
iio_push_ring_event(ring,
IIO_EVENT_CODE_RING_50_FULL, 0);

View file

@ -45,23 +45,10 @@ struct iio_event_data {
* struct iio_detected_event_list - list element for events that have occurred
* @list: linked list header
* @ev: the event itself
* @shared_pointer: used when the event is shared - i.e. can be escallated
* on demand (eg ring buffer 50%->100% full)
*/
struct iio_detected_event_list {
struct list_head list;
struct iio_event_data ev;
struct iio_shared_ev_pointer *shared_pointer;
};
/**
* struct iio_shared_ev_pointer - allows shared events to identify if currently
* in the detected event list
* @ev_p: pointer to detected event list element (null if not in list)
* @lock: protect this element to prevent simultaneous edit and remove
*/
struct iio_shared_ev_pointer {
struct iio_detected_event_list *ev_p;
spinlock_t lock;
};
/**

View file

@ -394,22 +394,10 @@ int iio_push_event(struct iio_dev *dev_info,
* @ev_int: the event interface to which we are pushing the event
* @ev_code: the outgoing event code
* @timestamp: timestamp of the event
* @shared_pointer_p: the shared event pointer
**/
int __iio_push_event(struct iio_event_interface *ev_int,
int ev_code,
s64 timestamp,
struct iio_shared_ev_pointer*
shared_pointer_p);
/**
* __iio_change_event() - change an event code in case of event escalation
* @ev: the event to be changed
* @ev_code: new event code
* @timestamp: new timestamp
**/
void __iio_change_event(struct iio_detected_event_list *ev,
int ev_code,
s64 timestamp);
int ev_code,
s64 timestamp);
/**
* iio_setup_ev_int() - configure an event interface (chrdev)

View file

@ -82,15 +82,6 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
};
void __iio_change_event(struct iio_detected_event_list *ev,
int ev_code,
s64 timestamp)
{
ev->ev.id = ev_code;
ev->ev.timestamp = timestamp;
}
EXPORT_SYMBOL(__iio_change_event);
/* Used both in the interrupt line put events and the ring buffer ones */
/* Note that in it's current form someone has to be listening before events
@ -99,9 +90,7 @@ EXPORT_SYMBOL(__iio_change_event);
*/
int __iio_push_event(struct iio_event_interface *ev_int,
int ev_code,
s64 timestamp,
struct iio_shared_ev_pointer *
shared_pointer_p)
s64 timestamp)
{
struct iio_detected_event_list *ev;
int ret = 0;
@ -121,9 +110,6 @@ int __iio_push_event(struct iio_event_interface *ev_int,
}
ev->ev.id = ev_code;
ev->ev.timestamp = timestamp;
ev->shared_pointer = shared_pointer_p;
if (ev->shared_pointer)
shared_pointer_p->ev_p = ev;
list_add_tail(&ev->list, &ev_int->det_events.list);
ev_int->current_events++;
@ -143,7 +129,7 @@ int iio_push_event(struct iio_dev *dev_info,
s64 timestamp)
{
return __iio_push_event(&dev_info->event_interfaces[ev_line],
ev_code, timestamp, NULL);
ev_code, timestamp);
}
EXPORT_SYMBOL(iio_push_event);
@ -311,18 +297,6 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
list_del(&el->list);
ev_int->current_events--;
mutex_unlock(&ev_int->event_list_lock);
/*
* Possible concurency issue if an update of this event is on its way
* through. May lead to new event being removed whilst the reported
* event was the unescalated event. In typical use case this is not a
* problem as userspace will say read half the buffer due to a 50%
* full event which would make the correct 100% full incorrect anyway.
*/
if (el->shared_pointer) {
spin_lock(&el->shared_pointer->lock);
(el->shared_pointer->ev_p) = NULL;
spin_unlock(&el->shared_pointer->lock);
}
kfree(el);
return len;

View file

@ -27,28 +27,11 @@ int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
s64 timestamp)
{
return __iio_push_event(&ring_buf->ev_int,
event_code,
timestamp,
&ring_buf->shared_ev_pointer);
event_code,
timestamp);
}
EXPORT_SYMBOL(iio_push_ring_event);
int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
int event_code,
s64 timestamp)
{
if (ring_buf->shared_ev_pointer.ev_p)
__iio_change_event(ring_buf->shared_ev_pointer.ev_p,
event_code,
timestamp);
else
return iio_push_ring_event(ring_buf,
event_code,
timestamp);
return 0;
}
EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
/**
* iio_ring_open() - chrdev file open for ring buffer access
*
@ -228,8 +211,6 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring,
ring->indio_dev = dev_info;
ring->ev_int.private = ring;
ring->access_handler.private = ring;
ring->shared_ev_pointer.ev_p = NULL;
spin_lock_init(&ring->shared_ev_pointer.lock);
}
EXPORT_SYMBOL(iio_ring_buffer_init);

View file

@ -107,8 +107,6 @@ struct iio_ring_access_funcs {
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
* @access_handler: [INTERN] chrdev access handling
* @ev_int: [INTERN] chrdev interface for the event chrdev
* @shared_ev_pointer: [INTERN] the shared event pointer to allow escalation of
* events
* @access: [DRIVER] ring access functions associated with the
* implementation.
* @preenable: [DRIVER] function to run prior to marking ring enabled
@ -133,7 +131,6 @@ struct iio_ring_buffer {
bool scan_timestamp;
struct iio_handler access_handler;
struct iio_event_interface ev_int;
struct iio_shared_ev_pointer shared_ev_pointer;
struct iio_ring_access_funcs access;
int (*preenable)(struct iio_dev *);
int (*postenable)(struct iio_dev *);

View file

@ -123,14 +123,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
*/
if (change_test_ptr == ring->read_p)
ring->read_p = temp_ptr;
spin_lock(&ring->buf.shared_ev_pointer.lock);
ret = iio_push_or_escallate_ring_event(&ring->buf,
IIO_EVENT_CODE_RING_100_FULL, timestamp);
spin_unlock(&ring->buf.shared_ev_pointer.lock);
if (ret)
goto error_ret;
}
/* investigate if our event barrier has been passed */
/* There are definite 'issues' with this and chances of
@ -140,15 +132,11 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
ring->half_p = ring->data;
if (ring->half_p == ring->read_p) {
spin_lock(&ring->buf.shared_ev_pointer.lock);
code = IIO_EVENT_CODE_RING_50_FULL;
ret = __iio_push_event(&ring->buf.ev_int,
code,
timestamp,
&ring->buf.shared_ev_pointer);
spin_unlock(&ring->buf.shared_ev_pointer.lock);
timestamp);
}
error_ret:
return ret;
}