summaryrefslogtreecommitdiff
path: root/drivers/md/dm-target.c
blob: 652627aea11b6188d2873ade3e8717e746dea6dd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2001 Sistina Software (UK) Limited
 *
 * This file is released under the GPL.
 */

#include "dm-core.h"

#include <linux/module.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/bio.h>
#include <linux/dax.h>

#define DM_MSG_PREFIX "target"

static LIST_HEAD(_targets);
static DECLARE_RWSEM(_lock);

static inline struct target_type *__find_target_type(const char *name)
{
	struct target_type *tt;

	list_for_each_entry(tt, &_targets, list)
		if (!strcmp(name, tt->name))
			return tt;

	return NULL;
}

static struct target_type *get_target_type(const char *name)
{
	struct target_type *tt;

	down_read(&_lock);

	tt = __find_target_type(name);
	if (tt && !try_module_get(tt->module))
		tt = NULL;

	up_read(&_lock);
	return tt;
}

static void load_module(const char *name)
{
	request_module("dm-%s", name);
}

struct target_type *dm_get_target_type(const char *name)
{
	struct target_type *tt = get_target_type(name);

	if (!tt) {
		load_module(name);
		tt = get_target_type(name);
	}

	return tt;
}

void dm_put_target_type(struct target_type *tt)
{
	down_read(&_lock);
	module_put(tt->module);
	up_read(&_lock);
}

int dm_target_iterate(void (*iter_func)(struct target_type *tt,
					void *param), void *param)
{
	struct target_type *tt;

	down_read(&_lock);
	list_for_each_entry(tt, &_targets, list)
		iter_func(tt, param);
	up_read(&_lock);

	return 0;
}

int dm_register_target(struct target_type *tt)
{
	int rv = 0;

	down_write(&_lock);
	if (__find_target_type(tt->name)) {
		DMERR("%s: '%s' target already registered",
		      __func__, tt->name);
		rv = -EEXIST;
	} else {
		list_add(&tt->list, &_targets);
	}
	up_write(&_lock);

	return rv;
}
EXPORT_SYMBOL(dm_register_target);

void dm_unregister_target(struct target_type *tt)
{
	down_write(&_lock);
	if (!__find_target_type(tt->name)) {
		DMCRIT("Unregistering unrecognised target: %s", tt->name);
		BUG();
	}

	list_del(&tt->list);

	up_write(&_lock);
}
EXPORT_SYMBOL(dm_unregister_target);

/*
 * io-err: always fails an io, useful for bringing
 * up LVs that have holes in them.
 */
struct io_err_c {
	struct dm_dev *dev;
	sector_t start;
};

static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
{
	unsigned long long start;
	struct io_err_c *ioec;
	char dummy;
	int ret;

	ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
	if (!ioec) {
		tt->error = "Cannot allocate io_err context";
		return -ENOMEM;
	}

	ret = -EINVAL;
	if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
	    start != (sector_t)start) {
		tt->error = "Invalid device sector";
		goto bad;
	}
	ioec->start = start;

	ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
	if (ret) {
		tt->error = "Device lookup failed";
		goto bad;
	}

	tt->private = ioec;

	return 0;

bad:
	kfree(ioec);

	return ret;
}

static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
{
	/*
	 * If we have arguments, assume it is the path to the backing
	 * block device and its mapping start sector (same as dm-linear).
	 * In this case, get the device so that we can get its limits.
	 */
	if (argc == 2) {
		int ret = io_err_get_args(tt, argc, args);

		if (ret)
			return ret;
	}

	/*
	 * Return error for discards instead of -EOPNOTSUPP
	 */
	tt->num_discard_bios = 1;
	tt->discards_supported = true;

	return 0;
}

static void io_err_dtr(struct dm_target *tt)
{
	struct io_err_c *ioec = tt->private;

	if (ioec) {
		dm_put_device(tt, ioec->dev);
		kfree(ioec);
	}
}

static int io_err_map(struct dm_target *tt, struct bio *bio)
{
	return DM_MAPIO_KILL;
}

static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
				   union map_info *map_context,
				   struct request **clone)
{
	return DM_MAPIO_KILL;
}

static void io_err_release_clone_rq(struct request *clone,
				    union map_info *map_context)
{
}

#ifdef CONFIG_BLK_DEV_ZONED
static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
{
	struct io_err_c *ioec = ti->private;

	return ioec->start + dm_target_offset(ti, bi_sector);
}

static int io_err_report_zones(struct dm_target *ti,
		struct dm_report_zones_args *args, unsigned int nr_zones)
{
	struct io_err_c *ioec = ti->private;

	/*
	 * This should never be called when we do not have a backing device
	 * as that mean the target is not a zoned one.
	 */
	if (WARN_ON_ONCE(!ioec))
		return -EIO;

	return dm_report_zones(ioec->dev->bdev, ioec->start,
			       io_err_map_sector(ti, args->next_sector),
			       args, nr_zones);
}
#else
#define io_err_report_zones NULL
#endif

static int io_err_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
	struct io_err_c *ioec = ti->private;

	if (!ioec)
		return 0;

	return fn(ti, ioec->dev, ioec->start, ti->len, data);
}

static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
	limits->max_hw_discard_sectors = UINT_MAX;
	limits->discard_granularity = 512;
}

static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
		long nr_pages, enum dax_access_mode mode, void **kaddr,
		pfn_t *pfn)
{
	return -EIO;
}

static struct target_type error_target = {
	.name = "error",
	.version = {1, 7, 0},
	.features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
	.ctr  = io_err_ctr,
	.dtr  = io_err_dtr,
	.map  = io_err_map,
	.clone_and_map_rq = io_err_clone_and_map_rq,
	.release_clone_rq = io_err_release_clone_rq,
	.iterate_devices = io_err_iterate_devices,
	.io_hints = io_err_io_hints,
	.direct_access = io_err_dax_direct_access,
	.report_zones = io_err_report_zones,
};

int __init dm_target_init(void)
{
	return dm_register_target(&error_target);
}

void dm_target_exit(void)
{
	dm_unregister_target(&error_target);
}