USB: make HCDs responsible for managing endpoint queues

This patch (as954) implements a suggestion of David Brownell's.  Now
the host controller drivers are responsible for linking and unlinking
URBs to/from their endpoint queues.  This eliminates the possiblity of
strange situations where usbcore thinks an URB is linked but the HCD
thinks it isn't.  It also means HCDs no longer have to check for URBs
being dequeued before they were fully enqueued.

In addition to the core changes, this requires changing every host
controller driver and the root-hub URB handler.  For the most part the
required changes are fairly small; drivers have to call
usb_hcd_link_urb_to_ep() in their urb_enqueue method,
usb_hcd_check_unlink_urb() in their urb_dequeue method, and
usb_hcd_unlink_urb_from_ep() before giving URBs back.  A few HCDs make
matters more complicated by the way they split up the flow of control.

In addition some method interfaces get changed.  The endpoint argument
for urb_enqueue is now redundant so it is removed.  The unlink status
is required by usb_hcd_check_unlink_urb(), so it has been added to
urb_dequeue.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
CC: Olav Kongas <ok@artecdesign.ee>
CC: Tony Olech <tony.olech@elandigitalsystems.com>
CC: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Alan Stern 2007-08-08 11:48:02 -04:00 committed by Greg Kroah-Hartman
parent b0e396e309
commit e9df41c5c5
13 changed files with 420 additions and 271 deletions

View file

@ -356,11 +356,17 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
const u8 *bufp = tbuf;
int len = 0;
int patch_wakeup = 0;
int status = 0;
int status;
int n;
might_sleep();
spin_lock_irq(&hcd_root_hub_lock);
status = usb_hcd_link_urb_to_ep(hcd, urb);
spin_unlock_irq(&hcd_root_hub_lock);
if (status)
return status;
cmd = (struct usb_ctrlrequest *) urb->setup_packet;
typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
wValue = le16_to_cpu (cmd->wValue);
@ -525,10 +531,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
spin_lock(&urb->lock);
if (urb->status == -EINPROGRESS)
urb->status = status;
spin_unlock(&urb->lock);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* This peculiar use of spinlocks echoes what real HC drivers do.
* Avoiding calls to local_irq_disable/enable makes the code
@ -571,26 +576,21 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
spin_lock_irqsave(&hcd_root_hub_lock, flags);
urb = hcd->status_urb;
if (urb) {
spin_lock(&urb->lock);
if (urb->status == -EINPROGRESS) {
hcd->poll_pending = 0;
hcd->status_urb = NULL;
urb->status = 0;
urb->hcpriv = NULL;
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
} else /* urb has been unlinked */
length = 0;
spin_unlock(&urb->lock);
hcd->poll_pending = 0;
hcd->status_urb = NULL;
urb->status = 0;
urb->hcpriv = NULL;
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&hcd_root_hub_lock);
} else
} else {
length = 0;
if (length <= 0)
hcd->poll_pending = 1;
}
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
}
@ -619,24 +619,26 @@ static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
int len = 1 + (urb->dev->maxchild / 8);
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (urb->status != -EINPROGRESS) /* already unlinked */
retval = urb->status;
else if (hcd->status_urb || urb->transfer_buffer_length < len) {
if (hcd->status_urb || urb->transfer_buffer_length < len) {
dev_dbg (hcd->self.controller, "not queuing rh status urb\n");
retval = -EINVAL;
} else {
hcd->status_urb = urb;
urb->hcpriv = hcd; /* indicate it's queued */
if (!hcd->uses_new_polling)
mod_timer (&hcd->rh_timer,
(jiffies/(HZ/4) + 1) * (HZ/4));
/* If a status change has already occurred, report it ASAP */
else if (hcd->poll_pending)
mod_timer (&hcd->rh_timer, jiffies);
retval = 0;
goto done;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto done;
hcd->status_urb = urb;
urb->hcpriv = hcd; /* indicate it's queued */
if (!hcd->uses_new_polling)
mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
/* If a status change has already occurred, report it ASAP */
else if (hcd->poll_pending)
mod_timer(&hcd->rh_timer, jiffies);
retval = 0;
done:
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
return retval;
}
@ -655,11 +657,16 @@ static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
/* Unlinks of root-hub control URBs are legal, but they don't do anything
* since these URBs always execute synchronously.
*/
static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&hcd_root_hub_lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */
; /* Do nothing */
@ -669,14 +676,16 @@ static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
if (urb == hcd->status_urb) {
hcd->status_urb = NULL;
urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&hcd_root_hub_lock);
}
}
done:
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
return 0;
return rc;
}
@ -977,12 +986,26 @@ EXPORT_SYMBOL (usb_calc_bus_time);
/*-------------------------------------------------------------------------*/
static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
/**
* usb_hcd_link_urb_to_ep - add an URB to its endpoint queue
* @hcd: host controller to which @urb was submitted
* @urb: URB being submitted
*
* Host controller drivers should call this routine in their enqueue()
* method. The HCD's private spinlock must be held and interrupts must
* be disabled. The actions carried out here are required for URB
* submission, as well as for endpoint shutdown and for usb_kill_urb.
*
* Returns 0 for no error, otherwise a negative error code (in which case
* the enqueue() method must fail). If no error occurs but enqueue() fails
* anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
* the private spinlock and returning.
*/
int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&hcd_urb_list_lock, flags);
spin_lock(&hcd_urb_list_lock);
/* Check that the URB isn't being killed */
if (unlikely(urb->reject)) {
@ -1009,48 +1032,48 @@ static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
goto done;
}
done:
spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
spin_unlock(&hcd_urb_list_lock);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
/**
* usb_hcd_check_unlink_urb - check whether an URB may be unlinked
* @hcd: host controller to which @urb was submitted
* @urb: URB being checked for unlinkability
* @status: error code to store in @urb if the unlink succeeds
*
* Host controller drivers should call this routine in their dequeue()
* method. The HCD's private spinlock must be held and interrupts must
* be disabled. The actions carried out here are required for making
* sure than an unlink is valid.
*
* Returns 0 for no error, otherwise a negative error code (in which case
* the dequeue() method must fail). The possible error codes are:
*
* -EIDRM: @urb was not submitted or has already completed.
* The completion function may not have been called yet.
*
* -EBUSY: @urb has already been unlinked.
*/
int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status)
{
unsigned long flags;
struct list_head *tmp;
int rc = 0;
/*
* we contend for urb->status with the hcd core,
* which changes it while returning the urb.
*
* Caller guaranteed that the urb pointer hasn't been freed, and
* that it was submitted. But as a rule it can't know whether or
* not it's already been unlinked ... so we respect the reversed
* lock sequence needed for the usb_hcd_giveback_urb() code paths
* (urb lock, then hcd_urb_list_lock) in case some other CPU is now
* unlinking it.
*/
spin_lock_irqsave(&urb->lock, flags);
spin_lock(&hcd_urb_list_lock);
/* insist the urb is still queued */
list_for_each(tmp, &urb->ep->urb_list) {
if (tmp == &urb->urb_list)
break;
}
if (tmp != &urb->urb_list) {
rc = -EIDRM;
goto done;
}
if (tmp != &urb->urb_list)
return -EIDRM;
/* Any status except -EINPROGRESS means something already started to
* unlink this URB from the hardware. So there's no more work to do.
*/
if (urb->status != -EINPROGRESS) {
rc = -EBUSY;
goto done;
}
if (urb->status != -EINPROGRESS)
return -EBUSY;
urb->status = status;
/* IRQ setup can easily be broken so that USB controllers
@ -1065,21 +1088,28 @@ static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
done:
spin_unlock(&hcd_urb_list_lock);
spin_unlock_irqrestore (&urb->lock, flags);
return rc;
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
static void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
/**
* usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
* @hcd: host controller to which @urb was submitted
* @urb: URB being unlinked
*
* Host controller drivers should call this routine before calling
* usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
* interrupts must be disabled. The actions carried out here are required
* for URB completion.
*/
void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
{
unsigned long flags;
/* clear all state linking urb to this dev (and hcd) */
spin_lock_irqsave(&hcd_urb_list_lock, flags);
spin_lock(&hcd_urb_list_lock);
list_del_init(&urb->urb_list);
spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
spin_unlock(&hcd_urb_list_lock);
}
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
@ -1153,20 +1183,15 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
* URBs must be submitted in process context with interrupts
* enabled.
*/
status = usb_hcd_link_urb_to_ep(hcd, urb);
if (!status) {
map_urb_for_dma(hcd, urb);
if (is_root_hub(urb->dev))
status = rh_urb_enqueue(hcd, urb);
else
status = hcd->driver->urb_enqueue(hcd, urb->ep, urb,
mem_flags);
}
map_urb_for_dma(hcd, urb);
if (is_root_hub(urb->dev))
status = rh_urb_enqueue(hcd, urb);
else
status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
unmap_urb_for_dma(hcd, urb);
usb_hcd_unlink_urb_from_ep(hcd, urb);
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
if (urb->reject)
@ -1183,24 +1208,19 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
* soon as practical. we've already set up the urb's return status,
* but we can't know if the callback completed already.
*/
static int
unlink1 (struct usb_hcd *hcd, struct urb *urb)
static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
{
int value;
if (is_root_hub(urb->dev))
value = usb_rh_urb_dequeue (hcd, urb);
value = usb_rh_urb_dequeue(hcd, urb, status);
else {
/* The only reason an HCD might fail this call is if
* it has not yet fully queued the urb to begin with.
* Such failures should be harmless. */
value = hcd->driver->urb_dequeue (hcd, urb);
value = hcd->driver->urb_dequeue(hcd, urb, status);
}
if (value != 0)
dev_dbg (hcd->self.controller, "dequeue %p --> %d\n",
urb, value);
return value;
}
@ -1216,14 +1236,11 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
int retval;
hcd = bus_to_hcd(urb->dev->bus);
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!retval)
retval = unlink1(hcd, urb);
retval = unlink1(hcd, urb, status);
if (retval == 0)
retval = -EINPROGRESS;
else if (retval != -EIDRM)
else if (retval != -EIDRM && retval != -EBUSY)
dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
urb, retval);
return retval;
@ -1245,7 +1262,6 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
*/
void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
{
usb_hcd_unlink_urb_from_ep(hcd, urb);
unmap_urb_for_dma(hcd, urb);
usbmon_urb_complete (&hcd->self, urb);
usb_unanchor_urb(urb);
@ -1282,7 +1298,6 @@ void usb_hcd_endpoint_disable (struct usb_device *udev,
rescan:
spin_lock_irq(&hcd_urb_list_lock);
list_for_each_entry (urb, &ep->urb_list, urb_list) {
int tmp;
int is_in;
/* the urb may already have been unlinked */
@ -1292,34 +1307,26 @@ void usb_hcd_endpoint_disable (struct usb_device *udev,
is_in = usb_urb_dir_in(urb);
spin_unlock(&hcd_urb_list_lock);
spin_lock (&urb->lock);
tmp = urb->status;
if (tmp == -EINPROGRESS)
urb->status = -ESHUTDOWN;
spin_unlock (&urb->lock);
/* kick hcd */
unlink1(hcd, urb, -ESHUTDOWN);
dev_dbg (hcd->self.controller,
"shutdown urb %p ep%d%s%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
({ char *s;
/* kick hcd unless it's already returning this */
if (tmp == -EINPROGRESS) {
unlink1 (hcd, urb);
dev_dbg (hcd->self.controller,
"shutdown urb %p ep%d%s%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
({ char *s;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
s = ""; break;
case USB_ENDPOINT_XFER_BULK:
s = "-bulk"; break;
case USB_ENDPOINT_XFER_INT:
s = "-intr"; break;
default:
s = "-iso"; break;
};
s;
}));
}
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
s = ""; break;
case USB_ENDPOINT_XFER_BULK:
s = "-bulk"; break;
case USB_ENDPOINT_XFER_INT:
s = "-intr"; break;
default:
s = "-iso"; break;
};
s;
}));
usb_put_urb (urb);
/* list contents may have changed */

View file

@ -189,11 +189,10 @@ struct hc_driver {
int (*get_frame_number) (struct usb_hcd *hcd);
/* manage i/o requests, device state */
int (*urb_enqueue) (struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags);
int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
int (*urb_enqueue)(struct usb_hcd *hcd,
struct urb *urb, gfp_t mem_flags);
int (*urb_dequeue)(struct usb_hcd *hcd,
struct urb *urb, int status);
/* hw synch, freeing endpoint resources that urb_dequeue can't */
void (*endpoint_disable)(struct usb_hcd *hcd,
@ -211,6 +210,11 @@ struct hc_driver {
/* Needed only if port-change IRQs are level-triggered */
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags);
extern int usb_hcd_unlink_urb (struct urb *urb, int status);
extern void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb);

View file

@ -962,13 +962,13 @@ static struct platform_driver dummy_udc_driver = {
static int dummy_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
struct dummy *dum;
struct urbp *urbp;
unsigned long flags;
int rc;
if (!urb->transfer_buffer && urb->transfer_buffer_length)
return -EINVAL;
@ -980,6 +980,11 @@ static int dummy_urb_enqueue (
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
if (!dum->udev) {
dum->udev = urb->dev;
@ -997,22 +1002,28 @@ static int dummy_urb_enqueue (
mod_timer (&dum->timer, jiffies + 1);
spin_unlock_irqrestore (&dum->lock, flags);
return 0;
done:
return rc;
}
static int dummy_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct dummy *dum;
unsigned long flags;
int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
if (dum->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum->urbp_list))
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum->urbp_list))
mod_timer (&dum->timer, jiffies);
spin_unlock_irqrestore (&dum->lock, flags);
return 0;
return rc;
}
static void maybe_set_status (struct urb *urb, int status)
@ -1511,6 +1522,7 @@ static void dummy_timer (unsigned long _dum)
if (ep)
ep->already_seen = ep->setup_stage = 0;
usb_hcd_unlink_urb_from_ep(dummy_to_hcd(dum), urb);
spin_unlock (&dum->lock);
usb_hcd_giveback_urb (dummy_to_hcd(dum), urb);
spin_lock (&dum->lock);

View file

@ -719,7 +719,6 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
@ -734,12 +733,12 @@ static int ehci_urb_enqueue (
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async (ehci, ep, urb, &qtd_list, mem_flags);
return submit_async(ehci, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit (ehci, ep, urb, &qtd_list, mem_flags);
return intr_submit(ehci, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
@ -777,13 +776,18 @@ static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
* completions normally happen asynchronously
*/
static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh;
unsigned long flags;
int rc;
spin_lock_irqsave (&ehci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
@ -838,7 +842,7 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
return rc;
}
/*-------------------------------------------------------------------------*/

View file

@ -262,6 +262,7 @@ __acquires(ehci->lock)
#endif
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
spin_unlock (&ehci->lock);
usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb);
spin_lock (&ehci->lock);
@ -913,7 +914,6 @@ static struct ehci_qh *qh_append_tds (
static int
submit_async (
struct ehci_hcd *ehci,
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
@ -922,10 +922,10 @@ submit_async (
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc = 0;
int rc;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
epnum = ep->desc.bEndpointAddress;
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
@ -933,7 +933,7 @@ submit_async (
__FUNCTION__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, ep->hcpriv);
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave (&ehci->lock, flags);
@ -942,9 +942,13 @@ submit_async (
rc = -ESHUTDOWN;
goto done;
}
rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(rc))
goto done;
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
rc = -ENOMEM;
goto done;
}

View file

@ -797,7 +797,6 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
static int intr_submit (
struct ehci_hcd *ehci,
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
@ -805,23 +804,26 @@ static int intr_submit (
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status = 0;
int status;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = ep->desc.bEndpointAddress;
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
&ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
goto done;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv);
qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
@ -832,13 +834,16 @@ static int intr_submit (
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON (qh == NULL);
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
done:
if (unlikely(status))
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
if (status)
qtd_list_free (ehci, urb, qtd_list);
@ -1686,12 +1691,19 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
&ehci_to_hcd(ehci)->flags)))
&ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
else
status = iso_stream_schedule (ehci, urb, stream);
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely (status == 0))
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:
@ -2049,12 +2061,19 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
&ehci_to_hcd(ehci)->flags)))
&ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
else
status = iso_stream_schedule (ehci, urb, stream);
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (status == 0)
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:

View file

@ -290,6 +290,7 @@ __releases(isp116x->lock) __acquires(isp116x->lock)
urb_dbg(urb, "Finish");
usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb);
spin_unlock(&isp116x->lock);
usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb);
spin_lock(&isp116x->lock);
@ -673,7 +674,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load)
/*-----------------------------------------------------------------*/
static int isp116x_urb_enqueue(struct usb_hcd *hcd,
struct usb_host_endpoint *hep, struct urb *urb,
struct urb *urb,
gfp_t mem_flags)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
@ -682,6 +683,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
struct usb_host_endpoint *hep = urb->ep;
struct isp116x_ep *ep = NULL;
unsigned long flags;
int i;
@ -705,7 +707,12 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
if (!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
ret = -ENODEV;
goto fail;
goto fail_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv)
@ -818,6 +825,9 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
start_atl_transfers(isp116x);
fail:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&isp116x->lock, flags);
return ret;
}
@ -825,20 +835,21 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
/*
Dequeue URBs.
*/
static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct usb_host_endpoint *hep;
struct isp116x_ep *ep, *ep_act;
unsigned long flags;
int rc;
spin_lock_irqsave(&isp116x->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
hep = urb->hcpriv;
/* URB already unlinked (or never linked)? */
if (!hep) {
spin_unlock_irqrestore(&isp116x->lock, flags);
return 0;
}
ep = hep->hcpriv;
WARN_ON(hep != ep->hep);
@ -856,9 +867,9 @@ static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
if (urb)
finish_request(isp116x, ep, urb);
done:
spin_unlock_irqrestore(&isp116x->lock, flags);
return 0;
return rc;
}
static void isp116x_endpoint_disable(struct usb_hcd *hcd,

View file

@ -117,7 +117,6 @@ MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
*/
static int ohci_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
@ -134,7 +133,7 @@ static int ohci_urb_enqueue (
#endif
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, ep, urb->dev, pipe, urb->interval)))
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
@ -199,22 +198,17 @@ static int ohci_urb_enqueue (
retval = -ENODEV;
goto fail;
}
/* in case of unlink-during-submit */
spin_lock (&urb->lock);
if (urb->status != -EINPROGRESS) {
spin_unlock (&urb->lock);
urb->hcpriv = urb_priv;
finish_urb (ohci, urb);
retval = 0;
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto fail;
}
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
if (retval < 0)
goto fail0;
if (retval < 0) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
goto fail;
}
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = ohci_frame_no(ohci);
@ -238,8 +232,6 @@ static int ohci_urb_enqueue (
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
fail0:
spin_unlock (&urb->lock);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
@ -253,17 +245,21 @@ static int ohci_urb_enqueue (
* asynchronously, and we might be dealing with an urb that's
* partially transferred, or an ED with other urbs being unlinked.
*/
static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
int rc;
#ifdef OHCI_VERBOSE_DEBUG
urb_print (urb, "UNLINK", 1);
#endif
spin_lock_irqsave (&ohci->lock, flags);
if (HC_IS_RUNNING(hcd->state)) {
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
} else if (HC_IS_RUNNING(hcd->state)) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@ -284,7 +280,7 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
finish_urb (ohci, urb);
}
spin_unlock_irqrestore (&ohci->lock, flags);
return 0;
return rc;
}
/*-------------------------------------------------------------------------*/

View file

@ -74,6 +74,7 @@ __acquires(ohci->lock)
#endif
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb);
spin_lock (&ohci->lock);

View file

@ -784,6 +784,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
if (urb) {
urb->status = -ENODEV;
urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
urb);
spin_unlock(&r8a66597->lock);
usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb);
spin_lock(&r8a66597->lock);
@ -1131,6 +1134,8 @@ static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td,
urb->start_frame = r8a66597_get_frame(hcd);
urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
spin_unlock(&r8a66597->lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&r8a66597->lock);
@ -1722,21 +1727,25 @@ static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
}
static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
struct usb_host_endpoint *hep,
struct urb *urb,
gfp_t mem_flags)
{
struct usb_host_endpoint *hep = urb->ep;
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td = NULL;
int ret = 0, request = 0;
int ret, request = 0;
unsigned long flags;
spin_lock_irqsave(&r8a66597->lock, flags);
if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
ret = -ENODEV;
goto error;
goto error_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto error_not_linked;
if (!hep->hcpriv) {
hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
GFP_ATOMIC);
@ -1761,15 +1770,7 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
request = 1;
list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
spin_lock(&urb->lock);
if (urb->status != -EINPROGRESS) {
spin_unlock(&urb->lock);
ret = -EPIPE;
goto error;
}
urb->hcpriv = td;
spin_unlock(&urb->lock);
if (request) {
ret = start_transfer(r8a66597, td);
@ -1781,17 +1782,26 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
set_td_timer(r8a66597, td);
error:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
error_not_linked:
spin_unlock_irqrestore(&r8a66597->lock, flags);
return ret;
}
static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td;
unsigned long flags;
int rc;
spin_lock_irqsave(&r8a66597->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (urb->hcpriv) {
td = urb->hcpriv;
pipe_stop(r8a66597, td->pipe);
@ -1799,8 +1809,9 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
disable_irq_empty(r8a66597, td->pipenum);
done(r8a66597, td, td->pipenum, urb);
}
done:
spin_unlock_irqrestore(&r8a66597->lock, flags);
return 0;
return rc;
}
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,

View file

@ -441,6 +441,7 @@ static void finish_request(
urb->hcpriv = NULL;
spin_unlock(&urb->lock);
usb_hcd_unlink_urb_from_ep(sl811_to_hcd(sl811), urb);
spin_unlock(&sl811->lock);
usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb);
spin_lock(&sl811->lock);
@ -807,7 +808,6 @@ static int balance(struct sl811 *sl811, u16 period, u16 load)
static int sl811h_urb_enqueue(
struct usb_hcd *hcd,
struct usb_host_endpoint *hep,
struct urb *urb,
gfp_t mem_flags
) {
@ -820,7 +820,8 @@ static int sl811h_urb_enqueue(
struct sl811h_ep *ep = NULL;
unsigned long flags;
int i;
int retval = 0;
int retval;
struct usb_host_endpoint *hep = urb->ep;
#ifdef DISABLE_ISO
if (type == PIPE_ISOCHRONOUS)
@ -838,7 +839,12 @@ static int sl811h_urb_enqueue(
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
kfree(ep);
goto fail;
goto fail_not_linked;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv) {
@ -965,23 +971,27 @@ static int sl811h_urb_enqueue(
start_transfer(sl811);
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
fail:
if (retval)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct usb_host_endpoint *hep;
unsigned long flags;
struct sl811h_ep *ep;
int retval = 0;
int retval;
spin_lock_irqsave(&sl811->lock, flags);
hep = urb->hcpriv;
if (!hep)
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval)
goto fail;
hep = urb->hcpriv;
ep = hep->hcpriv;
if (ep) {
/* finish right away if this urb can't be active ...
@ -1029,8 +1039,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
(sl811->active_a == ep) ? "A" : "B");
} else
fail:
retval = -EINVAL;
fail:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}

View file

@ -521,6 +521,7 @@ static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
urb->status = status;
urb->hcpriv = NULL;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
@ -561,6 +562,7 @@ static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
urb->status = status;
urb->hcpriv = NULL;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
@ -1876,20 +1878,32 @@ static int u132_hcd_reset(struct usb_hcd *hcd)
}
static int create_endpoint_and_queue_int(struct u132 *u132,
struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
struct u132_ring *ring;
unsigned long irqs;
u8 endp_number = ++u132->num_endpoints;
struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
kmalloc(sizeof(struct u132_endp), mem_flags);
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp) {
return -ENOMEM;
}
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
@ -1905,7 +1919,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = hep;
endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
@ -1924,7 +1938,6 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
u132_udev_get_kref(u132, udev);
}
urb->hcpriv = u132;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->delayed = 1;
endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
endp->udev_number = address;
@ -1939,8 +1952,8 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
return 0;
}
static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
struct usb_host_endpoint *hep, struct urb *urb,
static int queue_int_on_old_endpoint(struct u132 *u132,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
@ -1964,21 +1977,33 @@ static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
}
static int create_endpoint_and_queue_bulk(struct u132 *u132,
struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
int ring_number;
struct u132_ring *ring;
unsigned long irqs;
u8 endp_number = ++u132->num_endpoints;
struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
kmalloc(sizeof(struct u132_endp), mem_flags);
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp) {
return -ENOMEM;
}
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
endp->dequeueing = 0;
endp->edset_flush = 0;
@ -1986,7 +2011,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = hep;
endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
@ -2015,7 +2040,6 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
}
ring->length += 1;
urb->hcpriv = u132;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
@ -2029,7 +2053,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
}
static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
struct usb_host_endpoint *hep, struct urb *urb,
struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
@ -2051,19 +2075,32 @@ static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
}
static int create_endpoint_and_queue_control(struct u132 *u132,
struct usb_host_endpoint *hep, struct urb *urb,
struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
gfp_t mem_flags)
{
struct u132_ring *ring;
u8 endp_number = ++u132->num_endpoints;
struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
kmalloc(sizeof(struct u132_endp), mem_flags);
unsigned long irqs;
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp) {
return -ENOMEM;
}
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
@ -2079,11 +2116,10 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = hep;
endp->hep = urb->ep;
u132_endp_init_kref(u132, endp);
u132_endp_get_kref(u132, endp);
if (usb_addr == 0) {
unsigned long irqs;
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
@ -2097,7 +2133,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
@ -2106,7 +2141,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
u132_endp_queue_work(u132, endp, 0);
return 0;
} else { /*(usb_addr > 0) */
unsigned long irqs;
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
@ -2120,7 +2154,6 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
@ -2132,7 +2165,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
}
static int queue_control_on_old_endpoint(struct u132 *u132,
struct usb_host_endpoint *hep, struct urb *urb,
struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp)
{
@ -2232,8 +2265,8 @@ static int queue_control_on_old_endpoint(struct u132 *u132,
}
}
static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
struct urb *urb, gfp_t mem_flags)
static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (irqs_disabled()) {
@ -2258,16 +2291,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
struct u132_endp *endp = hep->hcpriv;
struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = queue_int_on_old_endpoint(u132, udev,
hep, urb, usb_dev, endp, usb_addr,
usb_endp, address);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_int_on_old_endpoint(
u132, udev, urb,
usb_dev, endp,
usb_addr, usb_endp,
address);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
@ -2282,8 +2323,8 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
return -EINVAL;
} else { /*(endp == NULL) */
return create_endpoint_and_queue_int(u132, udev,
hep, urb, usb_dev, usb_addr, usb_endp,
address, mem_flags);
urb, usb_dev, usb_addr,
usb_endp, address, mem_flags);
}
} else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
dev_err(&u132->platform_dev->dev, "the hardware does no"
@ -2292,16 +2333,24 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
} else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
struct u132_endp *endp = hep->hcpriv;
struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = queue_bulk_on_old_endpoint(u132, udev,
hep, urb, usb_dev, endp, usb_addr,
usb_endp, address);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_bulk_on_old_endpoint(
u132, udev, urb,
usb_dev, endp,
usb_addr, usb_endp,
address);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
@ -2314,10 +2363,10 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
return -EINVAL;
} else
return create_endpoint_and_queue_bulk(u132,
udev, hep, urb, usb_dev, usb_addr,
udev, urb, usb_dev, usb_addr,
usb_endp, address, mem_flags);
} else {
struct u132_endp *endp = hep->hcpriv;
struct u132_endp *endp = urb->ep->hcpriv;
u16 urb_size = 8;
u8 *b = urb->setup_packet;
int i = 0;
@ -2340,9 +2389,16 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = queue_control_on_old_endpoint(u132,
hep, urb, usb_dev, endp, usb_addr,
usb_endp);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_control_on_old_endpoint(
u132, urb, usb_dev,
endp, usb_addr,
usb_endp);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
@ -2355,7 +2411,7 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
return -EINVAL;
} else
return create_endpoint_and_queue_control(u132,
hep, urb, usb_dev, usb_addr, usb_endp,
urb, usb_dev, usb_addr, usb_endp,
mem_flags);
}
}
@ -2390,10 +2446,17 @@ static int dequeue_from_overflow_chain(struct u132 *u132,
}
static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
struct urb *urb)
struct urb *urb, int status)
{
unsigned long irqs;
int rc;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return rc;
}
if (endp->queue_size == 0) {
dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
"=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
@ -2438,6 +2501,8 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
}
if (urb_slot) {
struct usb_hcd *hcd = u132_to_hcd(u132);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_size -= 1;
if (list_empty(&endp->urb_more)) {
spin_unlock_irqrestore(&endp->queue_lock.slock,
@ -2467,7 +2532,10 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return -EINVAL;
} else {
int retval = dequeue_from_overflow_chain(u132, endp,
int retval;
usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
retval = dequeue_from_overflow_chain(u132, endp,
urb);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return retval;
@ -2475,7 +2543,7 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
}
}
static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 2) {
@ -2490,11 +2558,11 @@ static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
if (usb_pipein(urb->pipe)) {
u8 endp_number = udev->endp_number_in[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
return u132_endp_urb_dequeue(u132, endp, urb);
return u132_endp_urb_dequeue(u132, endp, urb, status);
} else {
u8 endp_number = udev->endp_number_out[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
return u132_endp_urb_dequeue(u132, endp, urb);
return u132_endp_urb_dequeue(u132, endp, urb, status);
}
}
}

View file

@ -1376,7 +1376,6 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
}
static int uhci_urb_enqueue(struct usb_hcd *hcd,
struct usb_host_endpoint *hep,
struct urb *urb, gfp_t mem_flags)
{
int ret;
@ -1387,19 +1386,19 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
spin_lock_irqsave(&uhci->lock, flags);
ret = urb->status;
if (ret != -EINPROGRESS) /* URB already unlinked! */
goto done;
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto done_not_linked;
ret = -ENOMEM;
urbp = uhci_alloc_urb_priv(uhci, urb);
if (!urbp)
goto done;
if (hep->hcpriv)
qh = (struct uhci_qh *) hep->hcpriv;
if (urb->ep->hcpriv)
qh = urb->ep->hcpriv;
else {
qh = uhci_alloc_qh(uhci, urb->dev, hep);
qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
if (!qh)
goto err_no_qh;
}
@ -1440,27 +1439,29 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
err_submit_failed:
if (qh->state == QH_STATE_IDLE)
uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
err_no_qh:
uhci_free_urb_priv(uhci, urbp);
done:
if (ret)
usb_hcd_unlink_urb_from_ep(hcd, urb);
done_not_linked:
spin_unlock_irqrestore(&uhci->lock, flags);
return ret;
}
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb_priv *urbp;
struct uhci_qh *qh;
int rc;
spin_lock_irqsave(&uhci->lock, flags);
urbp = urb->hcpriv;
if (!urbp) /* URB was never linked! */
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
qh = urbp->qh;
qh = ((struct urb_priv *) urb->hcpriv)->qh;
/* Remove Isochronous TDs from the frame list ASAP */
if (qh->type == USB_ENDPOINT_XFER_ISOC) {
@ -1477,7 +1478,7 @@ static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
done:
spin_unlock_irqrestore(&uhci->lock, flags);
return 0;
return rc;
}
/*
@ -1529,6 +1530,7 @@ __acquires(uhci->lock)
}
uhci_free_urb_priv(uhci, urbp);
usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
spin_unlock(&uhci->lock);
usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);