summaryrefslogtreecommitdiff
path: root/drivers/net/thunderbolt.c
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2021-01-08 17:25:39 +0300
committerMika Westerberg <mika.westerberg@linux.intel.com>2021-03-18 18:25:31 +0300
commit180b0689425c6fb2b35e69a3316ee38371a782df (patch)
treebbfac79fc08f79f6f5ce66a584ed6c1ae1a127af /drivers/net/thunderbolt.c
parent5cfdd300b7b1e6f2390cf0d71040a8c26297bef7 (diff)
downloadlinux-180b0689425c6fb2b35e69a3316ee38371a782df.tar.xz
thunderbolt: Allow multiple DMA tunnels over a single XDomain connection
Currently we have had an artificial limitation of a single DMA tunnel per XDomain connection. However, hardware wise there is no such limit and software based connection manager can take advantage of all the DMA rings available on the host to establish tunnels. For this reason make the tb_xdomain_[enable|disable]_paths() to take the DMA ring and HopID as parameter instead of storing them in the struct tb_xdomain. We also add API functions to allocate input and output HopIDs of the XDomain connection that the service drivers can use instead of hard-coding. Also convert the two existing service drivers over to this API. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/net/thunderbolt.c')
-rw-r--r--drivers/net/thunderbolt.c49
1 files changed, 38 insertions, 11 deletions
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ed3743dc62b9..5c9ec91b6e78 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -28,7 +28,6 @@
#define TBNET_LOGOUT_TIMEOUT 100
#define TBNET_RING_SIZE 256
-#define TBNET_LOCAL_PATH 0xf
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 5
#define TBNET_MATCH_FRAGS_ID BIT(1)
@@ -154,8 +153,8 @@ struct tbnet_ring {
* @login_sent: ThunderboltIP login message successfully sent
* @login_received: ThunderboltIP login message received from the remote
* host
- * @transmit_path: HopID the other end needs to use building the
- * opposite side path.
+ * @local_transmit_path: HopID we are using to send out packets
+ * @remote_transmit_path: HopID the other end is using to send packets to us
* @connection_lock: Lock serializing access to @login_sent,
* @login_received and @transmit_path.
* @login_retries: Number of login retries currently done
@@ -184,7 +183,8 @@ struct tbnet {
atomic_t command_id;
bool login_sent;
bool login_received;
- u32 transmit_path;
+ int local_transmit_path;
+ int remote_transmit_path;
struct mutex connection_lock;
int login_retries;
struct delayed_work login_work;
@@ -257,7 +257,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence)
atomic_inc_return(&net->command_id));
request.proto_version = TBIP_LOGIN_PROTO_VERSION;
- request.transmit_path = TBNET_LOCAL_PATH;
+ request.transmit_path = net->local_transmit_path;
return tb_xdomain_request(xd, &request, sizeof(request),
TB_CFG_PKG_XDOMAIN_RESP, &reply,
@@ -364,10 +364,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
mutex_lock(&net->connection_lock);
if (net->login_sent && net->login_received) {
- int retries = TBNET_LOGOUT_RETRIES;
+ int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) {
- int ret = tbnet_logout_request(net);
+ ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT)
break;
}
@@ -377,8 +377,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
tbnet_free_buffers(&net->rx_ring);
tbnet_free_buffers(&net->tx_ring);
- if (tb_xdomain_disable_paths(net->xd))
+ ret = tb_xdomain_disable_paths(net->xd,
+ net->local_transmit_path,
+ net->rx_ring.ring->hop,
+ net->remote_transmit_path,
+ net->tx_ring.ring->hop);
+ if (ret)
netdev_warn(net->dev, "failed to disable DMA paths\n");
+
+ tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
+ net->remote_transmit_path = 0;
}
net->login_retries = 0;
@@ -424,7 +432,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
if (!ret) {
mutex_lock(&net->connection_lock);
net->login_received = true;
- net->transmit_path = pkg->transmit_path;
+ net->remote_transmit_path = pkg->transmit_path;
/* If we reached the number of max retries or
* previous logout, schedule another round of
@@ -597,12 +605,18 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected)
return;
+ ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
+ if (ret != net->remote_transmit_path) {
+ netdev_err(net->dev, "failed to allocate Rx HopID\n");
+ return;
+ }
+
/* Both logins successful so enable the high-speed DMA paths and
* start the network device queue.
*/
- ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
+ ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop,
- net->transmit_path,
+ net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n");
@@ -629,6 +643,7 @@ err_free_rx_buffers:
err_stop_rings:
tb_ring_stop(net->rx_ring.ring);
tb_ring_stop(net->tx_ring.ring);
+ tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
}
static void tbnet_login_work(struct work_struct *work)
@@ -851,6 +866,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
+ int hopid;
netif_carrier_off(dev);
@@ -862,6 +878,15 @@ static int tbnet_open(struct net_device *dev)
}
net->tx_ring.ring = ring;
+ hopid = tb_xdomain_alloc_out_hopid(xd, -1);
+ if (hopid < 0) {
+ netdev_err(dev, "failed to allocate Tx HopID\n");
+ tb_ring_free(net->tx_ring.ring);
+ net->tx_ring.ring = NULL;
+ return hopid;
+ }
+ net->local_transmit_path = hopid;
+
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
@@ -893,6 +918,8 @@ static int tbnet_stop(struct net_device *dev)
tb_ring_free(net->rx_ring.ring);
net->rx_ring.ring = NULL;
+
+ tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;