summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4/sched.c
diff options
context:
space:
mode:
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>2019-11-07 18:59:09 +0300
committerDavid S. Miller <davem@davemloft.net>2019-11-07 21:41:59 +0300
commit0e395b3cb1fb82f5d056fd5425025a77da4d4f62 (patch)
treeb2450cf1b69e4e8927ab89792c8dd091e2f74bca /drivers/net/ethernet/chelsio/cxgb4/sched.c
parent4846d5330dafc82990be7ffe1d1b383157268bd9 (diff)
downloadlinux-0e395b3cb1fb82f5d056fd5425025a77da4d4f62.tar.xz
cxgb4: add FLOWC based QoS offload
Rework SCHED API to allow offloading TC-MQPRIO QoS configuration. The existing QUEUE based rate limiting throttles all queues sharing a traffic class, to the specified max rate limit value. So, if multiple queues share a traffic class, then all the queues get the aggregate specified max rate limit. So, introduce the new FLOWC based rate limiting, where multiple queues can share a traffic class with each queue getting its own individual specified max rate limit. For example, if 2 queues are bound to class 0, which is rate limited to 1 Gbps, then 2 queues using QUEUE based rate limiting, get the aggregate output of 1 Gbps only. In FLOWC based rate limiting, each queue gets its own output of max 1 Gbps each; i.e. 2 queues * 1 Gbps rate limit = 2 Gbps. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sched.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c229
1 files changed, 183 insertions, 46 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 60218dc676a8..0a98c4dbb36b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -92,45 +92,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf;
vf = 0;
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1,
+ &fw_param, &fw_class);
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ fe = (struct sched_flowc_entry *)arg;
+
+ fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
+ err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
+ fe->param.tid, fw_class);
break;
}
default:
err = -ENOTSUPP;
- goto out;
+ break;
}
- err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
-
-out:
return err;
}
-static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
- const unsigned int qid,
- int *index)
+static void *t4_sched_entry_lookup(struct port_info *pi,
+ enum sched_bind_type type,
+ const u32 val)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
- struct sched_class *found = NULL;
- int i;
+ void *found = NULL;
- /* Look for a class with matching bound queue parameters */
+ /* Look for an entry with matching @val */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
- struct sched_queue_entry *qe;
-
- i = 0;
- if (e->state == SCHED_STATE_UNUSED)
+ if (e->state == SCHED_STATE_UNUSED ||
+ e->bind_type != type)
continue;
- list_for_each_entry(qe, &e->queue_list, list) {
- if (qe->cntxt_id == qid) {
- found = e;
- if (index)
- *index = i;
- break;
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->entry_list, list) {
+ if (qe->cntxt_id == val) {
+ found = qe;
+ break;
+ }
}
- i++;
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list) {
+ if (fe->param.tid == val) {
+ found = fe;
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ return NULL;
}
if (found)
@@ -142,35 +166,26 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- unsigned int qid;
- int index = -1;
+ struct sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
- qid = txq->q.cntxt_id;
-
- /* Find the existing class that the queue is bound to */
- e = t4_sched_queue_lookup(pi, qid, &index);
- if (e && index >= 0) {
- int i = 0;
- list_for_each_entry(qe, &e->queue_list, list) {
- if (i == index)
- break;
- i++;
- }
+ /* Find the existing entry that the queue is bound to */
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ if (qe) {
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
if (err)
return err;
+ e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) {
@@ -183,11 +198,11 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
+ struct sched_class *e;
unsigned int qid;
int err = 0;
@@ -215,7 +230,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err)
goto out_err;
- list_add_tail(&qe->list, &e->queue_list);
+ list_add_tail(&qe->list, &e->entry_list);
+ e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt);
return err;
@@ -224,6 +240,73 @@ out_err:
return err;
}
+static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ /* Find the existing entry that the flowc is bound to */
+ fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
+ if (fe) {
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
+ false);
+ if (err)
+ return err;
+
+ e = &pi->sched_tbl->tab[fe->param.class];
+ list_del(&fe->list);
+ kvfree(fe);
+ if (atomic_dec_and_test(&e->refcnt)) {
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+ }
+ return err;
+}
+
+static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
+ if (!fe)
+ return -ENOMEM;
+
+ /* Unbind flowc from any existing class */
+ err = t4_sched_flowc_unbind(pi, p);
+ if (err)
+ goto out_err;
+
+ /* Bind flowc to specified class */
+ memcpy(&fe->param, p, sizeof(fe->param));
+
+ e = &s->tab[fe->param.class];
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
+ if (err)
+ goto out_err;
+
+ list_add_tail(&fe->list, &e->entry_list);
+ e->bind_type = SCHED_FLOWC;
+ atomic_inc(&e->refcnt);
+ return err;
+
+out_err:
+ kvfree(fe);
+ return err;
+}
+
static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e,
enum sched_bind_type type)
@@ -235,10 +318,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
- list_for_each_entry(qe, &e->queue_list, list)
+ list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param);
break;
}
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list)
+ t4_sched_flowc_unbind(pi, &fe->param);
+ break;
+ }
default:
break;
}
@@ -262,6 +352,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe);
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ if (bind)
+ err = t4_sched_flowc_bind(pi, fe);
+ else
+ err = t4_sched_flowc_unbind(pi, fe);
+ break;
+ }
default:
err = -ENOTSUPP;
break;
@@ -299,6 +398,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -340,6 +445,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -355,10 +466,13 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
struct sched_class *found = NULL;
+ struct sched_class *e, *end;
- if (!p) {
+ /* Only allow tc to be shared among SCHED_FLOWC types. For
+ * other types, always allocate a new tc.
+ */
+ if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
/* Get any available unused class */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
@@ -467,9 +581,32 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p);
}
-static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+/**
+ * cxgb4_sched_class_free - free a scheduling class
+ * @dev: net_device pointer
+ * @e: scheduling class
+ *
+ * Frees a scheduling class if there are no users.
+ */
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+
+ e = &s->tab[classid];
+ if (!atomic_read(&e->refcnt)) {
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+}
+
+static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
{
- t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+ struct port_info *pi = netdev2pinfo(dev);
+
+ t4_sched_class_unbind_all(pi, e, e->bind_type);
+ cxgb4_sched_class_free(dev, e->idx);
}
struct sched_table *t4_init_sched(unsigned int sched_size)
@@ -487,7 +624,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
- INIT_LIST_HEAD(&s->tab[i].queue_list);
+ INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -510,7 +647,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
- t4_sched_class_free(pi, e);
+ t4_sched_class_free(adap->port[j], e);
}
kvfree(s);
}