summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-dbgcap.c
diff options
context:
space:
mode:
authorMathias Nyman <mathias.nyman@linux.intel.com>2020-07-23 17:45:25 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-07-23 18:05:28 +0300
commite0aa56dc7b18c80b12a9afd23d90aa6d186f639a (patch)
treea3a7f3ce83047b4e566e945391df071ecf46828e /drivers/usb/host/xhci-dbgcap.c
parent11e229a7585160c33228a9789405a504b8150525 (diff)
downloadlinux-e0aa56dc7b18c80b12a9afd23d90aa6d186f639a.tar.xz
xhci: dbc: simplify dbc requests allocation and queueing
Don't pass endpoint pointer, dbctty should not be aware of struct dbc_ep, knowing the direction is enough. Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Link: https://lore.kernel.org/r/20200723144530.9992-23-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/xhci-dbgcap.c')
-rw-r--r--drivers/usb/host/xhci-dbgcap.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index af6e3622e53a..e3eec628edb5 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -135,8 +135,7 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status)
__releases(&dbc->lock)
__acquires(&dbc->lock)
{
- struct dbc_ep *dep = req->dep;
- struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_dbc *dbc = req->dbc;
struct device *dev = dbc->dev;
list_del_init(&req->list_pending);
@@ -151,7 +150,7 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status)
dma_unmap_single(dev,
req->dma,
req->length,
- dbc_ep_dma_direction(dep));
+ dbc_ep_dma_direction(req));
/* Give back the transfer request: */
spin_unlock(&dbc->lock);
@@ -187,18 +186,25 @@ static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
}
struct dbc_request *
-dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
+dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
{
struct dbc_request *req;
- req = kzalloc(sizeof(*req), gfp_flags);
+ if (direction != BULK_IN &&
+ direction != BULK_OUT)
+ return NULL;
+
+ if (!dbc)
+ return NULL;
+
+ req = kzalloc(sizeof(*req), flags);
if (!req)
return NULL;
- req->dep = dep;
+ req->dbc = dbc;
INIT_LIST_HEAD(&req->list_pending);
INIT_LIST_HEAD(&req->list_pool);
- req->direction = dep->direction;
+ req->direction = direction;
trace_xhci_dbc_alloc_request(req);
@@ -206,7 +212,7 @@ dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
}
void
-dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
+dbc_free_request(struct dbc_request *req)
{
trace_xhci_dbc_free_request(req);
@@ -242,7 +248,7 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
u64 addr;
union xhci_trb *trb;
unsigned int num_trbs;
- struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_dbc *dbc = req->dbc;
struct xhci_ring *ring = dep->ring;
u32 length, control, cycle;
@@ -286,11 +292,12 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
}
static int
-dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
+dbc_ep_do_queue(struct dbc_request *req)
{
int ret;
- struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_dbc *dbc = req->dbc;
struct device *dev = dbc->dev;
+ struct dbc_ep *dep = &dbc->eps[req->direction];
if (!req->length || !req->buf)
return -EINVAL;
@@ -322,16 +329,22 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
return 0;
}
-int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
- gfp_t gfp_flags)
+int dbc_ep_queue(struct dbc_request *req)
{
unsigned long flags;
- struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_dbc *dbc = req->dbc;
int ret = -ESHUTDOWN;
+ if (!dbc)
+ return -ENODEV;
+
+ if (req->direction != BULK_IN &&
+ req->direction != BULK_OUT)
+ return -EINVAL;
+
spin_lock_irqsave(&dbc->lock, flags);
if (dbc->state == DS_CONFIGURED)
- ret = dbc_ep_do_queue(dep, req);
+ ret = dbc_ep_do_queue(req);
spin_unlock_irqrestore(&dbc->lock, flags);
mod_delayed_work(system_wq, &dbc->event_work, 0);