summaryrefslogtreecommitdiff
path: root/drivers/scsi/cxgbi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 01:27:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 01:27:06 +0300
commit70e71ca0af244f48a5dcf56dc435243792e3a495 (patch)
treef7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /drivers/scsi/cxgbi
parentbae41e45b7400496b9bf0c70c6004419d9987819 (diff)
parent00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff)
downloadlinux-70e71ca0af244f48a5dcf56dc435243792e3a495.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New offloading infrastructure and example 'rocker' driver for offloading of switching and routing to hardware. This work was done by a large group of dedicated individuals, not limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend, Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu 2) Start making the networking operate on IOV iterators instead of modifying iov objects in-situ during transfers. Thanks to Al Viro and Herbert Xu. 3) A set of new netlink interfaces for the TIPC stack, from Richard Alpe. 4) Remove unnecessary looping during ipv6 routing lookups, from Martin KaFai Lau. 5) Add PAUSE frame generation support to gianfar driver, from Matei Pavaluca. 6) Allow for larger reordering levels in TCP, which are easily achievable in the real world right now, from Eric Dumazet. 7) Add a variable of napi_schedule that doesn't need to disable cpu interrupts, from Eric Dumazet. 8) Use a doubly linked list to optimize neigh_parms_release(), from Nicolas Dichtel. 9) Various enhancements to the kernel BPF verifier, and allow eBPF programs to actually be attached to sockets. From Alexei Starovoitov. 10) Support TSO/LSO in sunvnet driver, from David L Stevens. 11) Allow controlling ECN usage via routing metrics, from Florian Westphal. 12) Remote checksum offload, from Tom Herbert. 13) Add split-header receive, BQL, and xmit_more support to amd-xgbe driver, from Thomas Lendacky. 14) Add MPLS support to openvswitch, from Simon Horman. 15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen Klassert. 16) Do gro flushes on a per-device basis using a timer, from Eric Dumazet. This tries to resolve the conflicting goals between the desired handling of bulk vs. RPC-like traffic. 17) Allow userspace to ask for the CPU upon what a packet was received/steered, via SO_INCOMING_CPU. From Eric Dumazet. 18) Limit GSO packets to half the current congestion window, from Eric Dumazet. 19) Add a generic helper so that all drivers set their RSS keys in a consistent way, from Eric Dumazet. 20) Add xmit_more support to enic driver, from Govindarajulu Varadarajan. 21) Add VLAN packet scheduler action, from Jiri Pirko. 22) Support configurable RSS hash functions via ethtool, from Eyal Perry. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits) Fix race condition between vxlan_sock_add and vxlan_sock_release net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header net/mlx4: Add support for A0 steering net/mlx4: Refactor QUERY_PORT net/mlx4_core: Add explicit error message when rule doesn't meet configuration net/mlx4: Add A0 hybrid steering net/mlx4: Add mlx4_bitmap zone allocator net/mlx4: Add a check if there are too many reserved QPs net/mlx4: Change QP allocation scheme net/mlx4_core: Use tasklet for user-space CQ completion events net/mlx4_core: Mask out host side virtualization features for guests net/mlx4_en: Set csum level for encapsulated packets be2net: Export tunnel offloads only when a VxLAN tunnel is created gianfar: Fix dma check map error when DMA_API_DEBUG is enabled cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call net: fec: only enable mdio interrupt before phy device link up net: fec: clear all interrupt events to support i.MX6SX net: fec: reset fep link status in suspend function net: sock: fix access via invalid file descriptor net: introduce helper macro for_each_cmsghdr ...
Diffstat (limited to 'drivers/scsi/cxgbi')
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c113
1 files changed, 58 insertions, 55 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e6c3f55d9d36..69fbfc89efb6 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,18 +189,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
unsigned int qid_atid = ((unsigned int)csk->atid) |
(((unsigned int)csk->rss_qid) << 14);
- opt0 = KEEP_ALIVE(1) |
- WND_SCALE(wscale) |
- MSS_IDX(csk->mss_idx) |
- L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
- TX_CHAN(csk->tx_chan) |
- SMAC_SEL(csk->smac_idx) |
- ULP_MODE(ULP_MODE_ISCSI) |
- RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID |
- (1 << 20) |
- RSS_QUEUE(csk->rss_qid);
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ (RX_FC_DISABLE_F) |
+ RSS_QUEUE_V(csk->rss_qid);
if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req *req =
@@ -217,7 +217,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- opt2 |= 1 << 22;
+ opt2 |= RX_FC_VALID_F;
req->opt2 = cpu_to_be32(opt2);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -237,7 +237,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be64(V_FILTER_TUPLE(
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
@@ -272,19 +272,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
unsigned int qid_atid = ((unsigned int)csk->atid) |
(((unsigned int)csk->rss_qid) << 14);
- opt0 = KEEP_ALIVE(1) |
- WND_SCALE(wscale) |
- MSS_IDX(csk->mss_idx) |
- L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
- TX_CHAN(csk->tx_chan) |
- SMAC_SEL(csk->smac_idx) |
- ULP_MODE(ULP_MODE_ISCSI) |
- RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID |
- RX_FC_DISABLE |
- RSS_QUEUE(csk->rss_qid);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ RX_FC_DISABLE_F |
+ RSS_QUEUE_V(csk->rss_qid);
if (t4) {
struct cpl_act_open_req6 *req =
@@ -305,7 +305,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->opt0 = cpu_to_be64(opt0);
- opt2 |= RX_FC_VALID;
+ opt2 |= RX_FC_VALID_F;
req->opt2 = cpu_to_be32(opt2);
req->params = cpu_to_be32(cxgb4_select_ntuple(
@@ -328,10 +328,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
8);
req->opt0 = cpu_to_be64(opt0);
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
req->opt2 = cpu_to_be32(opt2);
- req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
}
@@ -452,7 +452,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
csk->tid));
- req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
+ req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
+ | RX_FORCE_ACK_F);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return credits;
}
@@ -500,10 +501,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
flowc->op_to_nparams =
- htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
+ htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8));
flowc->flowid_len16 =
- htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
- FW_WR_FLOWID(csk->tid));
+ htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) |
+ FW_WR_FLOWID_V(csk->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -543,30 +544,31 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
{
struct fw_ofld_tx_data_wr *req;
unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
- unsigned int wr_ulp_mode = 0;
+ unsigned int wr_ulp_mode = 0, val;
req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
if (is_ofld_imm(skb)) {
- req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(dlen));
- req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
- FW_WR_LEN16(credits));
+ req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(dlen));
+ req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
} else {
req->op_to_immdlen =
- cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(0));
+ cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(0));
req->flowid_len16 =
- cpu_to_be32(FW_WR_FLOWID(csk->tid) |
- FW_WR_LEN16(credits));
+ cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
}
if (submode)
- wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
- FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+ val = skb_peek(&csk->write_queue) ? 0 : 1;
req->tunnel_to_proxy = htonl(wr_ulp_mode |
- FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
+ FW_OFLD_TX_DATA_WR_SHOVE_V(val));
req->plen = htonl(len);
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
@@ -1445,16 +1447,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (is_t4(lldi->adapter_type))
- req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
- (ULP_MEMIO_ORDER(1)));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (ULP_MEMIO_ORDER_F));
else
- req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
- (V_T5_ULP_MEMIO_IMM(1)));
- req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
- req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (T5_ULP_MEMIO_IMM_F));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
- idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
idata->len = htonl(dlen);
}
@@ -1678,7 +1680,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
- cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
+ cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
+ << FW_VIID_PFN_S;
pr_info("cdev 0x%p,%s, pfvf %u.\n",
cdev, lldi->ports[0]->name, cdev->pfvf);