blob: 6691c879a6ca2e2bfd35db9bbee35a60d105827a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_VNET_H__
#define __MLX5_VNET_H__
#include "mlx5_vdpa.h"
#define to_mlx5_vdpa_ndev(__mvdev) \
container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
u32 tirn;
u32 rqtn;
bool valid;
};
#define MLX5V_MACVLAN_SIZE 256
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
struct mlx5_vdpa_virtqueue *vqs;
struct vdpa_callback *event_cbs;
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
* resources while driver in operational.
*/
struct rw_semaphore reslock;
struct mlx5_flow_table *rxft;
struct dentry *rx_dent;
struct dentry *rx_table_dent;
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
bool nb_registered;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
};
struct macvlan_node {
struct hlist_node hlist;
struct mlx5_flow_handle *ucast_rule;
struct mlx5_flow_handle *mcast_rule;
u64 macvlan;
};
#endif /* __MLX5_VNET_H__ */
|