summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tb.c
diff options
context:
space:
mode:
authorAndreas Noever <andreas.noever@gmail.com>2014-06-03 22:04:08 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-06-19 14:07:47 -0700
commit3364f0c12795713e89ae1209081c60d64bfb4ca1 (patch)
treec13a0c83e2a0268bb2af82a34f10732a6b46312b /drivers/thunderbolt/tb.c
parent520b670216a15fb949e6ec6a1af9b5dd55d219c7 (diff)
thunderbolt: Add support for simple pci tunnels
A pci downstream and pci upstream port can be connected through a tunnel. To establish the tunnel we have to setup two unidirectional paths between the two ports. Right now we only support paths with two hops (i.e. no chaining) and at most one pci device per thunderbolt device. Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/thunderbolt/tb.c')
-rw-r--r--drivers/thunderbolt/tb.c135
1 files changed, 135 insertions, 0 deletions
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1efcacc7210..177f61df464 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -10,6 +10,7 @@
#include "tb.h"
#include "tb_regs.h"
+#include "tunnel_pci.h"
/* enumeration & hot plug handling */
@@ -51,6 +52,124 @@ static void tb_scan_port(struct tb_port *port)
tb_scan_switch(sw);
}
+/**
+ * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
+ */
+static void tb_free_invalid_tunnels(struct tb *tb)
+{
+ struct tb_pci_tunnel *tunnel;
+ struct tb_pci_tunnel *n;
+ list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+ {
+ if (tb_pci_is_invalid(tunnel)) {
+ tb_pci_deactivate(tunnel);
+ tb_pci_free(tunnel);
+ }
+ }
+}
+
+/**
+ * find_pci_up_port() - return the first PCIe up port on @sw or NULL
+ */
+static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
+{
+ int i;
+ for (i = 1; i <= sw->config.max_port_number; i++)
+ if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+ return &sw->ports[i];
+ return NULL;
+}
+
+/**
+ * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ */
+static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
+{
+ int i;
+ int cap;
+ int res;
+ int data;
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&sw->ports[i]))
+ continue;
+ if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
+ continue;
+ cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
+ if (cap <= 0)
+ continue;
+ res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
+ if (res < 0)
+ continue;
+ if (data & 0x80000000)
+ continue;
+ return &sw->ports[i];
+ }
+ return NULL;
+}
+
+/**
+ * tb_activate_pcie_devices() - scan for and activate PCIe devices
+ *
+ * This method is somewhat ad hoc. For now it only supports one device
+ * per port and only devices at depth 1.
+ */
+static void tb_activate_pcie_devices(struct tb *tb)
+{
+ int i;
+ int cap;
+ u32 data;
+ struct tb_switch *sw;
+ struct tb_port *up_port;
+ struct tb_port *down_port;
+ struct tb_pci_tunnel *tunnel;
+ /* scan for pcie devices at depth 1*/
+ for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&tb->root_switch->ports[i]))
+ continue;
+ if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
+ continue;
+ if (!tb->root_switch->ports[i].remote)
+ continue;
+ sw = tb->root_switch->ports[i].remote->sw;
+ up_port = tb_find_pci_up_port(sw);
+ if (!up_port) {
+ tb_sw_info(sw, "no PCIe devices found, aborting\n");
+ continue;
+ }
+
+ /* check whether port is already activated */
+ cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
+ if (cap <= 0)
+ continue;
+ if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
+ continue;
+ if (data & 0x80000000) {
+ tb_port_info(up_port,
+ "PCIe port already activated, aborting\n");
+ continue;
+ }
+
+ down_port = tb_find_unused_down_port(tb->root_switch);
+ if (!down_port) {
+ tb_port_info(up_port,
+ "All PCIe down ports are occupied, aborting\n");
+ continue;
+ }
+ tunnel = tb_pci_alloc(tb, up_port, down_port);
+ if (!tunnel) {
+ tb_port_info(up_port,
+ "PCIe tunnel allocation failed, aborting\n");
+ continue;
+ }
+
+ if (tb_pci_activate(tunnel)) {
+ tb_port_info(up_port,
+ "PCIe tunnel activation failed, aborting\n");
+ tb_pci_free(tunnel);
+ }
+
+ }
+}
/* hotplug handling */
@@ -101,6 +220,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (port->remote) {
tb_port_info(port, "unplugged\n");
tb_sw_set_unpplugged(port->remote->sw);
+ tb_free_invalid_tunnels(tb);
tb_switch_free(port->remote->sw);
port->remote = NULL;
} else {
@@ -118,6 +238,10 @@ static void tb_handle_hotplug(struct work_struct *work)
} else if (port->remote->sw->config.depth > 1) {
tb_sw_warn(port->remote->sw,
"hotplug: chaining not supported\n");
+ } else {
+ tb_sw_info(port->remote->sw,
+ "hotplug: activating pcie devices\n");
+ tb_activate_pcie_devices(tb);
}
}
out:
@@ -154,8 +278,17 @@ static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
*/
void thunderbolt_shutdown_and_free(struct tb *tb)
{
+ struct tb_pci_tunnel *tunnel;
+ struct tb_pci_tunnel *n;
+
mutex_lock(&tb->lock);
+ /* tunnels are only present after everything has been initialized */
+ list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+ tb_pci_deactivate(tunnel);
+ tb_pci_free(tunnel);
+ }
+
if (tb->root_switch)
tb_switch_free(tb->root_switch);
tb->root_switch = NULL;
@@ -201,6 +334,7 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
tb->nhi = nhi;
mutex_init(&tb->lock);
mutex_lock(&tb->lock);
+ INIT_LIST_HEAD(&tb->tunnel_list);
tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
if (!tb->wq)
@@ -221,6 +355,7 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
+ tb_activate_pcie_devices(tb);
/* Allow tb_handle_hotplug to progress events */
tb->hotplug_active = true;