From 1566e7d6206fed959258a17c694834a4b801a3b5 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Tue, 14 Jun 2022 13:28:54 -0700 Subject: net: mana: Add the Linux MANA PF driver This minimal PF driver runs on bare metal. Currently Ethernet TX/RX works. SR-IOV management is not supported yet. Signed-off-by: Dexuan Cui Co-developed-by: Haiyang Zhang Signed-off-by: Haiyang Zhang Signed-off-by: Paolo Abeni --- drivers/net/ethernet/microsoft/mana/gdma.h | 10 ++ drivers/net/ethernet/microsoft/mana/gdma_main.c | 39 ++++++- drivers/net/ethernet/microsoft/mana/hw_channel.c | 18 ++- drivers/net/ethernet/microsoft/mana/hw_channel.h | 5 + drivers/net/ethernet/microsoft/mana/mana.h | 64 +++++++++++ drivers/net/ethernet/microsoft/mana/mana_en.c | 135 +++++++++++++++++++++++ 6 files changed, 267 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 41ecd156e95f..4a6efe6ada08 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -348,6 +348,7 @@ struct gdma_context { struct completion eq_test_event; u32 test_event_eq_id; + bool is_pf; void __iomem *bar0_va; void __iomem *shm_base; void __iomem *db_page_base; @@ -469,6 +470,15 @@ struct gdma_eqe { #define GDMA_REG_DB_PAGE_SIZE 0x10 #define GDMA_REG_SHM_OFFSET 0x18 +#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 +#define GDMA_PF_REG_DB_PAGE_OFF 0xC8 +#define GDMA_PF_REG_SHM_OFF 0x70 + +#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 + +#define MANA_PF_DEVICE_ID 0x00B9 +#define MANA_VF_DEVICE_ID 0x00BA + struct gdma_posted_wqe_info { u32 wqe_size_in_bu; }; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 49b85ca578b0..5f9240182351 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -18,7 +18,24 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset) return readq(g->bar0_va + offset); } -static void mana_gd_init_registers(struct pci_dev *pdev) +static void mana_gd_init_pf_regs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + void __iomem *sriov_base_va; + u64 sriov_base_off; + + gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF; + gc->db_page_base = gc->bar0_va + + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + + sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF); + + sriov_base_va = gc->bar0_va + sriov_base_off; + gc->shm_base = sriov_base_va + + mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF); +} + +static void mana_gd_init_vf_regs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -30,6 +47,16 @@ static void mana_gd_init_registers(struct pci_dev *pdev) gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); } +static void mana_gd_init_registers(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + if (gc->is_pf) + mana_gd_init_pf_regs(pdev); + else + mana_gd_init_vf_regs(pdev); +} + static int mana_gd_query_max_resources(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -1304,6 +1331,11 @@ static void mana_gd_cleanup(struct pci_dev *pdev) mana_gd_remove_irqs(pdev); } +static bool mana_is_pf(unsigned short dev_id) +{ + return dev_id == MANA_PF_DEVICE_ID; +} + static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct gdma_context *gc; @@ -1340,10 +1372,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!bar0_va) goto free_gc; + gc->is_pf = mana_is_pf(pdev->device); gc->bar0_va = bar0_va; gc->dev = &pdev->dev; - err = mana_gd_setup(pdev); if (err) goto unmap_bar; @@ -1438,7 +1470,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev) #endif static const struct pci_device_id mana_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) }, { } }; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 078d6a5a0768..543a5d5c304f 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -158,6 +158,14 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, hwc->rxq->msg_buf->gpa_mkey = val; hwc->txq->msg_buf->gpa_mkey = val; break; + + case HWC_INIT_DATA_PF_DEST_RQ_ID: + hwc->pf_dest_vrq_id = val; + break; + + case HWC_INIT_DATA_PF_DEST_CQ_ID: + hwc->pf_dest_vrcq_id = val; + break; } break; @@ -773,10 +781,13 @@ void mana_hwc_destroy_channel(struct gdma_context *gc) int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, const void *req, u32 resp_len, void *resp) { + struct gdma_context *gc = hwc->gdma_dev->gdma_context; struct hwc_work_request *tx_wr; struct hwc_wq *txq = hwc->txq; struct gdma_req_hdr *req_msg; struct hwc_caller_ctx *ctx; + u32 dest_vrcq = 0; + u32 dest_vrq = 0; u16 msg_id; int err; @@ -803,7 +814,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, tx_wr->msg_size = req_len; - err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false); + if (gc->is_pf) { + dest_vrq = hwc->pf_dest_vrq_id; + dest_vrcq = hwc->pf_dest_vrcq_id; + } + + err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false); if (err) { dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err); goto out; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h index 31c6e83c454a..6a757a6e2732 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.h +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h @@ -20,6 +20,8 @@ #define HWC_INIT_DATA_MAX_NUM_CQS 7 #define HWC_INIT_DATA_PDID 8 #define HWC_INIT_DATA_GPA_MKEY 9 +#define HWC_INIT_DATA_PF_DEST_RQ_ID 10 +#define HWC_INIT_DATA_PF_DEST_CQ_ID 11 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of * them are naturally aligned and hence don't need __packed. @@ -178,6 +180,9 @@ struct hw_channel_context { struct semaphore sema; struct gdma_resource inflight_msg_res; + u32 pf_dest_vrq_id; + u32 pf_dest_vrcq_id; + struct hwc_caller_ctx *caller_ctx; }; diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index d36405af9432..f198b34c232f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -374,6 +374,7 @@ struct mana_port_context { unsigned int num_queues; mana_handle_t port_handle; + mana_handle_t pf_filter_handle; u16 port_idx; @@ -420,6 +421,12 @@ enum mana_command_code { MANA_FENCE_RQ = 0x20006, MANA_CONFIG_VPORT_RX = 0x20007, MANA_QUERY_VPORT_CONFIG = 0x20008, + + /* Privileged commands for the PF mode */ + MANA_REGISTER_FILTER = 0x28000, + MANA_DEREGISTER_FILTER = 0x28001, + MANA_REGISTER_HW_PORT = 0x28003, + MANA_DEREGISTER_HW_PORT = 0x28004, }; /* Query Device Configuration */ @@ -547,6 +554,63 @@ struct mana_cfg_rx_steer_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ +/* Register HW vPort */ +struct mana_register_hw_vport_req { + struct gdma_req_hdr hdr; + u16 attached_gfid; + u8 is_pf_default_vport; + u8 reserved1; + u8 allow_all_ether_types; + u8 reserved2; + u8 reserved3; + u8 reserved4; +}; /* HW DATA */ + +struct mana_register_hw_vport_resp { + struct gdma_resp_hdr hdr; + mana_handle_t hw_vport_handle; +}; /* HW DATA */ + +/* Deregister HW vPort */ +struct mana_deregister_hw_vport_req { + struct gdma_req_hdr hdr; + mana_handle_t hw_vport_handle; +}; /* HW DATA */ + +struct mana_deregister_hw_vport_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Register filter */ +struct mana_register_filter_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u8 mac_addr[6]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + u8 reserved4; + u16 reserved5; + u32 reserved6; + u32 reserved7; + u32 reserved8; +}; /* HW DATA */ + +struct mana_register_filter_resp { + struct gdma_resp_hdr hdr; + mana_handle_t filter_handle; +}; /* HW DATA */ + +/* Deregister filter */ +struct mana_deregister_filter_req { + struct gdma_req_hdr hdr; + mana_handle_t filter_handle; +}; /* HW DATA */ + +struct mana_deregister_filter_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + #define MANA_MAX_NUM_QUEUES 64 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b1d773823232..3ef09e0cdbaa 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -446,6 +446,119 @@ static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, return 0; } +static int mana_pf_register_hw_vport(struct mana_port_context *apc) +{ + struct mana_register_hw_vport_resp resp = {}; + struct mana_register_hw_vport_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT, + sizeof(req), sizeof(resp)); + req.attached_gfid = 1; + req.is_pf_default_vport = 1; + req.allow_all_ether_types = 1; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + apc->port_handle = resp.hw_vport_handle; + return 0; +} + +static void mana_pf_deregister_hw_vport(struct mana_port_context *apc) +{ + struct mana_deregister_hw_vport_resp resp = {}; + struct mana_deregister_hw_vport_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT, + sizeof(req), sizeof(resp)); + req.hw_vport_handle = apc->port_handle; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", + err); + return; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT, + sizeof(resp)); + if (err || resp.hdr.status) + netdev_err(apc->ndev, + "Failed to deregister hw vPort: %d, 0x%x\n", + err, resp.hdr.status); +} + +static int mana_pf_register_filter(struct mana_port_context *apc) +{ + struct mana_register_filter_resp resp = {}; + struct mana_register_filter_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to register filter: %d\n", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", + err, resp.hdr.status); + return err ? err : -EPROTO; + } + + apc->pf_filter_handle = resp.filter_handle; + return 0; +} + +static void mana_pf_deregister_filter(struct mana_port_context *apc) +{ + struct mana_deregister_filter_resp resp = {}; + struct mana_deregister_filter_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER, + sizeof(req), sizeof(resp)); + req.filter_handle = apc->pf_filter_handle; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to unregister filter: %d\n", + err); + return; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER, + sizeof(resp)); + if (err || resp.hdr.status) + netdev_err(apc->ndev, + "Failed to deregister filter: %d, 0x%x\n", + err, resp.hdr.status); +} + static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, u32 proto_minor_ver, u32 proto_micro_ver, u16 *max_num_vports) @@ -1653,6 +1766,7 @@ out: static void mana_destroy_vport(struct mana_port_context *apc) { + struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_rxq *rxq; u32 rxq_idx; @@ -1666,6 +1780,9 @@ static void mana_destroy_vport(struct mana_port_context *apc) } mana_destroy_txq(apc); + + if (gd->gdma_context->is_pf) + mana_pf_deregister_hw_vport(apc); } static int mana_create_vport(struct mana_port_context *apc, @@ -1676,6 +1793,12 @@ static int mana_create_vport(struct mana_port_context *apc, apc->default_rxobj = INVALID_MANA_HANDLE; + if (gd->gdma_context->is_pf) { + err = mana_pf_register_hw_vport(apc); + if (err) + return err; + } + err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); if (err) return err; @@ -1755,6 +1878,7 @@ reset_apc: int mana_alloc_queues(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); + struct gdma_dev *gd = apc->ac->gdma_dev; int err; err = mana_create_vport(apc, ndev); @@ -1781,6 +1905,12 @@ int mana_alloc_queues(struct net_device *ndev) if (err) goto destroy_vport; + if (gd->gdma_context->is_pf) { + err = mana_pf_register_filter(apc); + if (err) + goto destroy_vport; + } + mana_chn_setxdp(apc, mana_xdp_get(apc)); return 0; @@ -1825,6 +1955,7 @@ int mana_attach(struct net_device *ndev) static int mana_dealloc_queues(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); + struct gdma_dev *gd = apc->ac->gdma_dev; struct mana_txq *txq; int i, err; @@ -1833,6 +1964,9 @@ static int mana_dealloc_queues(struct net_device *ndev) mana_chn_setxdp(apc, NULL); + if (gd->gdma_context->is_pf) + mana_pf_deregister_filter(apc); + /* No packet can be transmitted now since apc->port_is_up is false. * There is still a tiny chance that mana_poll_tx_cq() can re-enable * a txq because it may not timely see apc->port_is_up being cleared @@ -1915,6 +2049,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, apc->max_queues = gc->max_num_queues; apc->num_queues = gc->max_num_queues; apc->port_handle = INVALID_MANA_HANDLE; + apc->pf_filter_handle = INVALID_MANA_HANDLE; apc->port_idx = port_idx; ndev->netdev_ops = &mana_devops; -- cgit v1.2.3