summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
blob: 5ec3af2a3ecd9b34fb6b4b8fa4fa40dc8d0e767b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */

#ifndef __MLX5_LAG_FS_H__
#define __MLX5_LAG_FS_H__

#include "lib/fs_ttc.h"

struct mlx5_lag_definer {
	struct mlx5_flow_definer *definer;
	struct mlx5_flow_table *ft;
	struct mlx5_flow_group *fg;
	/* Each port has ldev->buckets number of rules and they are arrange in
	 * [port * buckets .. port * buckets + buckets) locations
	 */
	struct mlx5_flow_handle *rules[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
};

struct mlx5_lag_ttc {
	struct mlx5_ttc_table *ttc;
	struct mlx5_lag_definer *definers[MLX5_NUM_TT];
};

struct mlx5_lag_port_sel {
	DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
	bool   tunnel;
	struct mlx5_lag_ttc outer;
	struct mlx5_lag_ttc inner;
};

#ifdef CONFIG_MLX5_ESWITCH

int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports);
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
			     enum netdev_lag_hash hash_type, u8 *ports);

#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
					   enum netdev_lag_hash hash_type,
					   u8 *ports)
{
	return 0;
}

static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{
	return 0;
}

static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_FS_H__ */