2014-10-02 02:54:14 +00:00
|
|
|
/*
|
|
|
|
* Greybus connections
|
|
|
|
*
|
|
|
|
* Copyright 2014 Google Inc.
|
2014-12-12 18:08:42 +00:00
|
|
|
* Copyright 2014 Linaro Ltd.
|
2014-10-02 02:54:14 +00:00
|
|
|
*
|
|
|
|
* Released under the GPLv2 only.
|
|
|
|
*/
|
|
|
|
|
2015-07-23 08:50:02 +00:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
|
2014-10-02 02:54:14 +00:00
|
|
|
#include "greybus.h"
|
|
|
|
|
2014-10-03 19:14:22 +00:00
|
|
|
static DEFINE_SPINLOCK(gb_connections_lock);
|
|
|
|
|
2015-06-09 22:42:58 +00:00
|
|
|
/* This is only used at initialization time; no locking is required. */
|
|
|
|
static struct gb_connection *
|
2015-07-01 06:43:56 +00:00
|
|
|
gb_connection_intf_find(struct gb_interface *intf, u16 cport_id)
|
2015-06-09 22:42:58 +00:00
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = intf->hd;
|
2015-06-09 22:42:58 +00:00
|
|
|
struct gb_connection *connection;
|
|
|
|
|
2015-11-25 14:59:12 +00:00
|
|
|
list_for_each_entry(connection, &hd->connections, hd_links) {
|
|
|
|
if (connection->intf == intf &&
|
2015-07-01 06:43:56 +00:00
|
|
|
connection->intf_cport_id == cport_id)
|
2015-06-09 22:42:58 +00:00
|
|
|
return connection;
|
2015-11-25 14:59:12 +00:00
|
|
|
}
|
|
|
|
|
2015-06-09 22:42:58 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-08 17:05:11 +00:00
|
|
|
static struct gb_connection *
|
2015-11-03 17:03:23 +00:00
|
|
|
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
|
2014-10-06 11:53:08 +00:00
|
|
|
{
|
2015-06-09 22:42:58 +00:00
|
|
|
struct gb_connection *connection;
|
2015-03-02 08:55:26 +00:00
|
|
|
unsigned long flags;
|
2014-10-06 11:53:08 +00:00
|
|
|
|
2015-03-02 08:55:26 +00:00
|
|
|
spin_lock_irqsave(&gb_connections_lock, flags);
|
2014-11-17 14:08:44 +00:00
|
|
|
list_for_each_entry(connection, &hd->connections, hd_links)
|
|
|
|
if (connection->hd_cport_id == cport_id)
|
2014-10-06 17:26:02 +00:00
|
|
|
goto found;
|
|
|
|
connection = NULL;
|
2015-06-09 22:42:58 +00:00
|
|
|
found:
|
2015-03-02 08:55:26 +00:00
|
|
|
spin_unlock_irqrestore(&gb_connections_lock, flags);
|
2014-10-06 11:53:08 +00:00
|
|
|
|
|
|
|
return connection;
|
|
|
|
}
|
|
|
|
|
2014-11-20 22:09:18 +00:00
|
|
|
/*
|
|
|
|
* Callback from the host driver to let us know that data has been
|
2014-12-12 22:10:17 +00:00
|
|
|
* received on the bundle.
|
2014-11-20 22:09:18 +00:00
|
|
|
*/
|
2015-11-03 17:03:23 +00:00
|
|
|
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
|
2014-11-18 00:08:37 +00:00
|
|
|
u8 *data, size_t length)
|
|
|
|
{
|
|
|
|
struct gb_connection *connection;
|
|
|
|
|
2015-05-20 11:18:00 +00:00
|
|
|
connection = gb_connection_hd_find(hd, cport_id);
|
2014-11-18 00:08:37 +00:00
|
|
|
if (!connection) {
|
2015-11-25 14:59:02 +00:00
|
|
|
dev_err(&hd->dev,
|
2014-11-18 00:08:37 +00:00
|
|
|
"nonexistent connection (%zu bytes dropped)\n", length);
|
|
|
|
return;
|
|
|
|
}
|
2014-11-18 19:26:50 +00:00
|
|
|
gb_connection_recv(connection, data, length);
|
2014-11-18 00:08:37 +00:00
|
|
|
}
|
2014-11-20 22:09:18 +00:00
|
|
|
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
|
2014-11-18 00:08:37 +00:00
|
|
|
|
2015-10-16 23:56:38 +00:00
|
|
|
static DEFINE_MUTEX(connection_mutex);
|
2014-10-24 09:34:46 +00:00
|
|
|
|
2015-10-16 23:56:38 +00:00
|
|
|
static void gb_connection_kref_release(struct kref *kref)
|
2014-10-24 09:34:46 +00:00
|
|
|
{
|
2015-10-16 23:56:38 +00:00
|
|
|
struct gb_connection *connection;
|
2014-10-24 09:34:46 +00:00
|
|
|
|
2015-10-16 23:56:38 +00:00
|
|
|
connection = container_of(kref, struct gb_connection, kref);
|
2015-07-23 08:50:02 +00:00
|
|
|
destroy_workqueue(connection->wq);
|
2014-10-24 09:34:46 +00:00
|
|
|
kfree(connection);
|
2015-10-16 23:56:38 +00:00
|
|
|
mutex_unlock(&connection_mutex);
|
2014-10-24 09:34:46 +00:00
|
|
|
}
|
|
|
|
|
2015-07-24 10:02:19 +00:00
|
|
|
int svc_update_connection(struct gb_interface *intf,
|
|
|
|
struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_bundle *bundle;
|
|
|
|
|
|
|
|
bundle = gb_bundle_create(intf, GB_SVC_BUNDLE_ID, GREYBUS_CLASS_SVC);
|
|
|
|
if (!bundle)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
connection->bundle = bundle;
|
|
|
|
|
|
|
|
spin_lock_irq(&gb_connections_lock);
|
|
|
|
list_add(&connection->bundle_links, &bundle->connections);
|
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-02 02:54:14 +00:00
|
|
|
/*
|
2015-11-25 14:59:11 +00:00
|
|
|
* gb_connection_create() - create a Greybus connection
|
|
|
|
* @hd: host device of the connection
|
|
|
|
* @hd_cport_id: host-device cport id, or -1 for dynamic allocation
|
|
|
|
* @intf: remote interface, or NULL for static connections
|
|
|
|
* @bundle: remote-interface bundle (may be NULL)
|
|
|
|
* @cport_id: remote-interface cport id, or 0 for static connections
|
|
|
|
* @protocol_id: protocol id
|
|
|
|
*
|
|
|
|
* Create a Greybus connection, representing the bidirectional link
|
2014-10-02 02:54:14 +00:00
|
|
|
* between a CPort on a (local) Greybus host device and a CPort on
|
2015-11-25 14:59:11 +00:00
|
|
|
* another Greybus interface.
|
2014-10-02 02:54:14 +00:00
|
|
|
*
|
2014-10-02 02:54:15 +00:00
|
|
|
* A connection also maintains the state of operations sent over the
|
|
|
|
* connection.
|
|
|
|
*
|
2015-11-25 14:59:11 +00:00
|
|
|
* Return: A pointer to the new connection if successful, or NULL otherwise.
|
2014-10-02 02:54:14 +00:00
|
|
|
*/
|
2015-11-25 14:59:11 +00:00
|
|
|
static struct gb_connection *
|
|
|
|
gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
|
|
|
|
struct gb_interface *intf,
|
|
|
|
struct gb_bundle *bundle, int cport_id,
|
|
|
|
u8 protocol_id)
|
2014-10-02 02:54:14 +00:00
|
|
|
{
|
|
|
|
struct gb_connection *connection;
|
2015-06-12 15:21:08 +00:00
|
|
|
struct ida *id_map = &hd->cport_id_map;
|
2015-11-25 14:59:11 +00:00
|
|
|
int ida_start, ida_end;
|
2014-10-24 09:34:46 +00:00
|
|
|
int retval;
|
2014-11-05 22:12:50 +00:00
|
|
|
u8 major = 0;
|
|
|
|
u8 minor = 1;
|
2014-10-02 02:54:14 +00:00
|
|
|
|
2015-06-09 22:42:58 +00:00
|
|
|
/*
|
|
|
|
* If a manifest tries to reuse a cport, reject it. We
|
|
|
|
* initialize connections serially so we don't need to worry
|
|
|
|
* about holding the connection lock.
|
|
|
|
*/
|
2015-07-21 12:14:16 +00:00
|
|
|
if (bundle && gb_connection_intf_find(bundle->intf, cport_id)) {
|
2015-11-03 11:11:30 +00:00
|
|
|
dev_err(&bundle->dev, "cport 0x%04hx already connected\n",
|
|
|
|
cport_id);
|
2015-06-09 22:42:58 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 14:59:11 +00:00
|
|
|
if (hd_cport_id < 0) {
|
|
|
|
ida_start = 0;
|
|
|
|
ida_end = hd->num_cports;
|
|
|
|
} else if (hd_cport_id < hd->num_cports) {
|
|
|
|
ida_start = hd_cport_id;
|
|
|
|
ida_end = hd_cport_id + 1;
|
|
|
|
} else {
|
|
|
|
dev_err(&hd->dev, "cport %d not available\n", hd_cport_id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-23 08:50:01 +00:00
|
|
|
hd_cport_id = ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
|
|
|
|
if (hd_cport_id < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-02 02:54:14 +00:00
|
|
|
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
|
|
|
|
if (!connection)
|
2015-07-23 08:50:01 +00:00
|
|
|
goto err_remove_ida;
|
2014-10-02 02:54:14 +00:00
|
|
|
|
2015-07-23 08:50:00 +00:00
|
|
|
connection->hd_cport_id = hd_cport_id;
|
2015-06-12 15:21:08 +00:00
|
|
|
connection->intf_cport_id = cport_id;
|
2015-06-09 22:42:58 +00:00
|
|
|
connection->hd = hd;
|
2015-11-25 14:59:11 +00:00
|
|
|
connection->intf = intf;
|
2015-06-12 15:21:08 +00:00
|
|
|
|
2014-12-23 23:16:53 +00:00
|
|
|
connection->protocol_id = protocol_id;
|
|
|
|
connection->major = major;
|
|
|
|
connection->minor = minor;
|
2014-10-07 03:29:40 +00:00
|
|
|
|
2014-12-12 22:10:17 +00:00
|
|
|
connection->bundle = bundle;
|
2014-10-22 07:04:30 +00:00
|
|
|
connection->state = GB_CONNECTION_STATE_DISABLED;
|
2014-10-02 17:30:06 +00:00
|
|
|
|
2015-07-22 15:49:19 +00:00
|
|
|
atomic_set(&connection->op_cycle, 0);
|
|
|
|
spin_lock_init(&connection->lock);
|
|
|
|
INIT_LIST_HEAD(&connection->operations);
|
|
|
|
|
2015-07-23 08:50:02 +00:00
|
|
|
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
|
2015-11-25 14:59:03 +00:00
|
|
|
dev_name(&hd->dev), hd_cport_id);
|
2015-07-23 08:50:02 +00:00
|
|
|
if (!connection->wq)
|
|
|
|
goto err_free_connection;
|
|
|
|
|
2015-10-16 23:56:38 +00:00
|
|
|
kref_init(&connection->kref);
|
2014-10-24 09:34:46 +00:00
|
|
|
|
2014-10-03 19:14:22 +00:00
|
|
|
spin_lock_irq(&gb_connections_lock);
|
2015-06-04 12:46:45 +00:00
|
|
|
list_add(&connection->hd_links, &hd->connections);
|
2015-07-21 12:14:16 +00:00
|
|
|
|
|
|
|
if (bundle)
|
|
|
|
list_add(&connection->bundle_links, &bundle->connections);
|
|
|
|
else
|
|
|
|
INIT_LIST_HEAD(&connection->bundle_links);
|
|
|
|
|
2014-10-03 19:14:22 +00:00
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
2015-10-13 15:34:51 +00:00
|
|
|
retval = gb_connection_bind_protocol(connection);
|
|
|
|
if (retval) {
|
2015-11-25 14:59:03 +00:00
|
|
|
dev_err(&hd->dev, "%d: failed to bind protocol: %d\n",
|
2015-10-16 23:56:23 +00:00
|
|
|
cport_id, retval);
|
2015-10-13 15:34:51 +00:00
|
|
|
gb_connection_destroy(connection);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-02 02:54:14 +00:00
|
|
|
return connection;
|
2015-07-23 08:50:01 +00:00
|
|
|
|
2015-07-23 08:50:02 +00:00
|
|
|
err_free_connection:
|
|
|
|
kfree(connection);
|
2015-07-23 08:50:01 +00:00
|
|
|
err_remove_ida:
|
|
|
|
ida_simple_remove(id_map, hd_cport_id);
|
|
|
|
|
|
|
|
return NULL;
|
2014-10-02 02:54:14 +00:00
|
|
|
}
|
|
|
|
|
2015-11-25 14:59:11 +00:00
|
|
|
struct gb_connection *
|
|
|
|
gb_connection_create_static(struct gb_host_device *hd,
|
|
|
|
u16 hd_cport_id, u8 protocol_id)
|
|
|
|
{
|
|
|
|
return gb_connection_create(hd, hd_cport_id, NULL, NULL, 0,
|
|
|
|
protocol_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct gb_connection *
|
|
|
|
gb_connection_create_dynamic(struct gb_interface *intf,
|
|
|
|
struct gb_bundle *bundle,
|
|
|
|
u16 cport_id, u8 protocol_id)
|
|
|
|
{
|
|
|
|
return gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
|
|
|
|
protocol_id);
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:17:26 +00:00
|
|
|
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-09-17 11:17:26 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->cport_enable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = hd->driver->cport_enable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-11-25 14:59:02 +00:00
|
|
|
dev_err(&hd->dev,
|
2015-10-16 23:56:23 +00:00
|
|
|
"failed to enable host cport: %d\n", ret);
|
2015-09-17 11:17:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gb_connection_hd_cport_disable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-09-17 11:17:26 +00:00
|
|
|
|
|
|
|
if (!hd->driver->cport_disable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hd->driver->cport_disable(hd, connection->hd_cport_id);
|
|
|
|
}
|
|
|
|
|
2015-07-14 13:43:31 +00:00
|
|
|
/*
|
|
|
|
* Cancel all active operations on a connection.
|
|
|
|
*
|
|
|
|
* Should only be called during connection tear down.
|
|
|
|
*/
|
|
|
|
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
|
|
int errno)
|
|
|
|
{
|
|
|
|
struct gb_operation *operation;
|
|
|
|
|
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
while (!list_empty(&connection->operations)) {
|
|
|
|
operation = list_last_entry(&connection->operations,
|
|
|
|
struct gb_operation, links);
|
|
|
|
gb_operation_get(operation);
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-07-14 13:43:35 +00:00
|
|
|
if (gb_operation_is_incoming(operation))
|
|
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
|
|
else
|
|
|
|
gb_operation_cancel(operation, errno);
|
|
|
|
|
2015-07-14 13:43:31 +00:00
|
|
|
gb_operation_put(operation);
|
|
|
|
|
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:17:21 +00:00
|
|
|
/*
|
|
|
|
* Request the SVC to create a connection from AP's cport to interface's
|
|
|
|
* cport.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
gb_connection_svc_connection_create(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-10-07 19:40:24 +00:00
|
|
|
struct gb_interface *intf;
|
2015-09-17 11:17:21 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-11-25 14:59:13 +00:00
|
|
|
if (gb_connection_is_static(connection))
|
2015-09-17 11:17:21 +00:00
|
|
|
return 0;
|
|
|
|
|
2015-10-07 19:40:24 +00:00
|
|
|
intf = connection->bundle->intf;
|
2015-09-17 11:17:21 +00:00
|
|
|
ret = gb_svc_connection_create(hd->svc,
|
2015-11-25 14:59:09 +00:00
|
|
|
hd->svc->ap_intf_id,
|
2015-09-17 11:17:21 +00:00
|
|
|
connection->hd_cport_id,
|
2015-10-07 19:40:24 +00:00
|
|
|
intf->interface_id,
|
|
|
|
connection->intf_cport_id,
|
|
|
|
intf->boot_over_unipro);
|
2015-09-17 11:17:21 +00:00
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to create svc connection: %d\n", ret);
|
2015-09-17 11:17:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-07 10:31:22 +00:00
|
|
|
static void
|
|
|
|
gb_connection_svc_connection_destroy(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-25 14:59:13 +00:00
|
|
|
if (gb_connection_is_static(connection))
|
2015-09-07 10:31:22 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
gb_svc_connection_destroy(connection->hd->svc,
|
2015-11-25 14:59:09 +00:00
|
|
|
connection->hd->svc->ap_intf_id,
|
2015-09-07 10:31:22 +00:00
|
|
|
connection->hd_cport_id,
|
|
|
|
connection->bundle->intf->interface_id,
|
|
|
|
connection->intf_cport_id);
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:17:24 +00:00
|
|
|
/* Inform Interface about active CPorts */
|
|
|
|
static int gb_connection_control_connected(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
|
|
|
struct gb_control *control;
|
|
|
|
u16 cport_id = connection->intf_cport_id;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_CONNECTED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
control = connection->bundle->intf->control;
|
|
|
|
|
|
|
|
ret = gb_control_connected_operation(control, cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to connect cport: %d\n", ret);
|
2015-09-17 11:17:24 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:17:23 +00:00
|
|
|
/* Inform Interface about inactive CPorts */
|
|
|
|
static void
|
|
|
|
gb_connection_control_disconnected(struct gb_connection *connection)
|
2015-08-11 02:05:56 +00:00
|
|
|
{
|
2015-09-17 11:17:23 +00:00
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
2015-08-11 02:05:56 +00:00
|
|
|
struct gb_control *control;
|
2015-09-17 11:17:23 +00:00
|
|
|
u16 cport_id = connection->intf_cport_id;
|
2015-08-11 02:05:56 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-09-17 11:17:23 +00:00
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED)
|
2015-08-11 02:05:56 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
control = connection->bundle->intf->control;
|
|
|
|
|
|
|
|
ret = gb_control_disconnected_operation(control, cport_id);
|
2015-09-17 11:17:23 +00:00
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_warn(&connection->bundle->dev,
|
|
|
|
"failed to disconnect cport: %d\n", ret);
|
2015-09-17 11:17:23 +00:00
|
|
|
}
|
2015-08-11 02:05:56 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 11:17:25 +00:00
|
|
|
/*
|
|
|
|
* Request protocol version supported by the module. We don't need to do
|
|
|
|
* this for SVC as that is initiated by the SVC.
|
|
|
|
*/
|
|
|
|
static int gb_connection_protocol_get_version(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (protocol->flags & GB_PROTOCOL_SKIP_VERSION)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = gb_protocol_get_version(connection);
|
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_err(&connection->bundle->dev,
|
|
|
|
"failed to get protocol version: %d\n", ret);
|
2015-09-17 11:17:25 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-31 11:51:09 +00:00
|
|
|
static int gb_connection_init(struct gb_connection *connection)
|
2014-10-16 11:35:35 +00:00
|
|
|
{
|
2015-09-07 10:31:24 +00:00
|
|
|
struct gb_protocol *protocol = connection->protocol;
|
2014-10-22 07:04:30 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-09-17 11:17:26 +00:00
|
|
|
ret = gb_connection_hd_cport_enable(connection);
|
2015-09-17 11:17:21 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-09-07 10:31:21 +00:00
|
|
|
|
2015-09-17 11:17:26 +00:00
|
|
|
ret = gb_connection_svc_connection_create(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_hd_cport_disable;
|
|
|
|
|
2015-09-17 11:17:24 +00:00
|
|
|
ret = gb_connection_control_connected(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_svc_destroy;
|
2015-06-22 11:12:29 +00:00
|
|
|
|
2014-11-05 22:12:54 +00:00
|
|
|
/* Need to enable the connection to initialize it */
|
2015-07-14 13:43:30 +00:00
|
|
|
spin_lock_irq(&connection->lock);
|
2014-11-05 22:12:54 +00:00
|
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
2015-07-14 13:43:30 +00:00
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-09-17 11:17:25 +00:00
|
|
|
ret = gb_connection_protocol_get_version(connection);
|
|
|
|
if (ret)
|
|
|
|
goto err_disconnect;
|
2014-10-22 07:04:30 +00:00
|
|
|
|
2015-09-07 10:31:24 +00:00
|
|
|
ret = protocol->connection_init(connection);
|
2015-09-17 11:17:22 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_disconnect;
|
|
|
|
|
|
|
|
return 0;
|
2015-08-11 02:05:56 +00:00
|
|
|
|
2015-09-17 11:17:22 +00:00
|
|
|
err_disconnect:
|
2015-08-11 02:05:59 +00:00
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
connection->state = GB_CONNECTION_STATE_ERROR;
|
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-09-17 11:17:23 +00:00
|
|
|
gb_connection_control_disconnected(connection);
|
2015-09-17 11:17:22 +00:00
|
|
|
err_svc_destroy:
|
2015-09-07 10:31:22 +00:00
|
|
|
gb_connection_svc_connection_destroy(connection);
|
2015-09-17 11:17:26 +00:00
|
|
|
err_hd_cport_disable:
|
|
|
|
gb_connection_hd_cport_disable(connection);
|
2015-09-07 10:31:22 +00:00
|
|
|
|
2014-10-22 07:04:30 +00:00
|
|
|
return ret;
|
2014-10-16 11:35:35 +00:00
|
|
|
}
|
2014-10-21 04:01:04 +00:00
|
|
|
|
2015-08-31 11:51:13 +00:00
|
|
|
static void gb_connection_exit(struct gb_connection *connection)
|
2014-10-21 04:01:04 +00:00
|
|
|
{
|
2015-08-31 11:51:15 +00:00
|
|
|
if (!connection->protocol)
|
2014-10-27 11:04:30 +00:00
|
|
|
return;
|
2015-03-17 09:55:52 +00:00
|
|
|
|
2015-07-14 13:43:30 +00:00
|
|
|
spin_lock_irq(&connection->lock);
|
|
|
|
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
|
|
|
spin_unlock_irq(&connection->lock);
|
2015-03-17 09:55:52 +00:00
|
|
|
return;
|
2015-07-14 13:43:30 +00:00
|
|
|
}
|
2014-10-27 11:04:30 +00:00
|
|
|
connection->state = GB_CONNECTION_STATE_DESTROYING;
|
2015-07-14 13:43:30 +00:00
|
|
|
spin_unlock_irq(&connection->lock);
|
|
|
|
|
2015-07-14 13:43:32 +00:00
|
|
|
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
|
|
|
|
2014-11-05 22:12:54 +00:00
|
|
|
connection->protocol->connection_exit(connection);
|
2015-09-17 11:17:23 +00:00
|
|
|
gb_connection_control_disconnected(connection);
|
2015-09-07 10:31:22 +00:00
|
|
|
gb_connection_svc_connection_destroy(connection);
|
2015-09-17 11:17:26 +00:00
|
|
|
gb_connection_hd_cport_disable(connection);
|
2014-10-21 04:01:04 +00:00
|
|
|
}
|
2015-07-09 05:26:30 +00:00
|
|
|
|
2015-08-31 11:51:13 +00:00
|
|
|
/*
|
|
|
|
* Tear down a previously set up connection.
|
|
|
|
*/
|
|
|
|
void gb_connection_destroy(struct gb_connection *connection)
|
|
|
|
{
|
|
|
|
struct ida *id_map;
|
|
|
|
|
|
|
|
if (WARN_ON(!connection))
|
|
|
|
return;
|
|
|
|
|
|
|
|
gb_connection_exit(connection);
|
|
|
|
|
|
|
|
spin_lock_irq(&gb_connections_lock);
|
|
|
|
list_del(&connection->bundle_links);
|
|
|
|
list_del(&connection->hd_links);
|
|
|
|
spin_unlock_irq(&gb_connections_lock);
|
|
|
|
|
|
|
|
if (connection->protocol)
|
|
|
|
gb_protocol_put(connection->protocol);
|
|
|
|
connection->protocol = NULL;
|
|
|
|
|
|
|
|
id_map = &connection->hd->cport_id_map;
|
|
|
|
ida_simple_remove(id_map, connection->hd_cport_id);
|
|
|
|
connection->hd_cport_id = CPORT_ID_BAD;
|
|
|
|
|
2015-10-16 23:56:38 +00:00
|
|
|
kref_put_mutex(&connection->kref, gb_connection_kref_release,
|
|
|
|
&connection_mutex);
|
2015-08-31 11:51:13 +00:00
|
|
|
}
|
|
|
|
|
2015-10-15 15:10:42 +00:00
|
|
|
void gb_connection_latency_tag_enable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-10-15 15:10:42 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->latency_tag_enable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_err(&connection->bundle->dev,
|
2015-10-15 15:10:42 +00:00
|
|
|
"failed to enable latency tag: %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
|
|
|
|
|
|
|
|
void gb_connection_latency_tag_disable(struct gb_connection *connection)
|
|
|
|
{
|
2015-11-03 17:03:23 +00:00
|
|
|
struct gb_host_device *hd = connection->hd;
|
2015-10-15 15:10:42 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hd->driver->latency_tag_disable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
|
|
|
|
if (ret) {
|
2015-10-16 23:56:23 +00:00
|
|
|
dev_err(&connection->bundle->dev,
|
2015-10-15 15:10:42 +00:00
|
|
|
"failed to disable latency tag: %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
|
|
|
|
|
2015-08-31 11:51:14 +00:00
|
|
|
int gb_connection_bind_protocol(struct gb_connection *connection)
|
2015-08-31 11:51:09 +00:00
|
|
|
{
|
|
|
|
struct gb_protocol *protocol;
|
2015-08-31 11:51:14 +00:00
|
|
|
int ret;
|
2015-08-31 11:51:09 +00:00
|
|
|
|
|
|
|
/* If we already have a protocol bound here, just return */
|
|
|
|
if (connection->protocol)
|
2015-08-31 11:51:14 +00:00
|
|
|
return 0;
|
2015-08-31 11:51:09 +00:00
|
|
|
|
|
|
|
protocol = gb_protocol_get(connection->protocol_id,
|
|
|
|
connection->major,
|
|
|
|
connection->minor);
|
2015-10-13 17:10:28 +00:00
|
|
|
if (!protocol) {
|
2015-11-25 14:59:02 +00:00
|
|
|
dev_warn(&connection->hd->dev,
|
2015-10-13 17:10:28 +00:00
|
|
|
"protocol 0x%02hhx version %hhu.%hhu not found\n",
|
|
|
|
connection->protocol_id,
|
|
|
|
connection->major, connection->minor);
|
2015-08-31 11:51:14 +00:00
|
|
|
return 0;
|
2015-10-13 17:10:28 +00:00
|
|
|
}
|
2015-08-31 11:51:09 +00:00
|
|
|
connection->protocol = protocol;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a valid device_id for the interface block, then we have an
|
|
|
|
* active device, so bring up the connection at the same time.
|
|
|
|
*/
|
|
|
|
if ((!connection->bundle &&
|
2015-09-07 10:31:24 +00:00
|
|
|
protocol->flags & GB_PROTOCOL_NO_BUNDLE) ||
|
2015-08-31 11:51:14 +00:00
|
|
|
connection->bundle->intf->device_id != GB_DEVICE_ID_BAD) {
|
|
|
|
ret = gb_connection_init(connection);
|
|
|
|
if (ret) {
|
|
|
|
gb_protocol_put(protocol);
|
|
|
|
connection->protocol = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2015-08-31 11:51:09 +00:00
|
|
|
}
|