From 06e932fea1aafbaa62c0c872060de5f28f717814 Mon Sep 17 00:00:00 2001 From: Matthew Sakai Date: Thu, 16 Nov 2023 21:08:47 -0500 Subject: dm vdo: add the primary vdo structure Add the data and methods that manage the dm-vdo target itself. This includes the overall state of the target and its threads, the state of the logical volumes, startup, shutdown, and statistics. Co-developed-by: J. corwin Coburn Signed-off-by: J. corwin Coburn Co-developed-by: Michael Sclafani Signed-off-by: Michael Sclafani Co-developed-by: Sweet Tea Dorminy Signed-off-by: Sweet Tea Dorminy Signed-off-by: Matthew Sakai Signed-off-by: Mike Snitzer --- drivers/md/dm-vdo/vdo.c | 1784 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1784 insertions(+) create mode 100644 drivers/md/dm-vdo/vdo.c (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c new file mode 100644 index 000000000000..b494c11e1d64 --- /dev/null +++ b/drivers/md/dm-vdo/vdo.c @@ -0,0 +1,1784 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2023 Red Hat + */ + +/* + * This file contains the main entry points for normal operations on a vdo as well as functions for + * constructing and destroying vdo instances (in memory). + */ + +/** + * DOC: + * + * A read_only_notifier has a single completion which is used to perform read-only notifications, + * however, vdo_enter_read_only_mode() may be called from any thread. A pair of fields, protected + * by a spinlock, are used to control the read-only mode entry process. The first field holds the + * read-only error. The second is the state field, which may hold any of the four special values + * enumerated here. + * + * When vdo_enter_read_only_mode() is called from some vdo thread, if the read_only_error field + * already contains an error (i.e. its value is not VDO_SUCCESS), then some other error has already + * initiated the read-only process, and nothing more is done. Otherwise, the new error is stored in + * the read_only_error field, and the state field is consulted. If the state is MAY_NOTIFY, it is + * set to NOTIFYING, and the notification process begins. If the state is MAY_NOT_NOTIFY, then + * notifications are currently disallowed, generally due to the vdo being suspended. In this case, + * the nothing more will be done until the vdo is resumed, at which point the notification will be + * performed. In any other case, the vdo is already read-only, and there is nothing more to do. + */ + +#include "vdo.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "logger.h" +#include "memory-alloc.h" +#include "permassert.h" +#include "string-utils.h" + +#include "block-map.h" +#include "completion.h" +#include "data-vio.h" +#include "dedupe.h" +#include "encodings.h" +#include "funnel-workqueue.h" +#include "io-submitter.h" +#include "logical-zone.h" +#include "packer.h" +#include "physical-zone.h" +#include "pool-sysfs.h" +#include "recovery-journal.h" +#include "slab-depot.h" +#include "statistics.h" +#include "status-codes.h" +#include "vio.h" + +enum { PARANOID_THREAD_CONSISTENCY_CHECKS = 0 }; + +struct sync_completion { + struct vdo_completion vdo_completion; + struct completion completion; +}; + +/* + * We don't expect this set to ever get really large, so a linked list is adequate. We can use a + * pointer_map if we need to later. + */ +struct device_registry { + struct list_head links; + /* TODO: Convert to rcu per kernel recommendation. */ + rwlock_t lock; +}; + +static struct device_registry registry; + +/** + * vdo_initialize_device_registry_once() - Initialize the necessary structures for the device + * registry. + */ +void vdo_initialize_device_registry_once(void) +{ + INIT_LIST_HEAD(®istry.links); + rwlock_init(®istry.lock); +} + +/** vdo_is_equal() - Implements vdo_filter_fn. */ +static bool vdo_is_equal(struct vdo *vdo, const void *context) +{ + return (vdo == context); +} + +/** + * filter_vdos_locked() - Find a vdo in the registry if it exists there. + * @filter: The filter function to apply to devices. + * @context: A bit of context to provide the filter. + * + * Context: Must be called holding the lock. + * + * Return: the vdo object found, if any. + */ +static struct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter, + const void *context) +{ + struct vdo *vdo; + + list_for_each_entry(vdo, ®istry.links, registration) { + if (filter(vdo, context)) + return vdo; + } + + return NULL; +} + +/** + * vdo_find_matching() - Find and return the first (if any) vdo matching a given filter function. + * @filter: The filter function to apply to vdos. + * @context: A bit of context to provide the filter. + */ +struct vdo *vdo_find_matching(vdo_filter_fn filter, const void *context) +{ + struct vdo *vdo; + + read_lock(®istry.lock); + vdo = filter_vdos_locked(filter, context); + read_unlock(®istry.lock); + + return vdo; +} + +static void start_vdo_request_queue(void *ptr) +{ + struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue()); + + uds_register_allocating_thread(&thread->allocating_thread, + &thread->vdo->allocations_allowed); +} + +static void finish_vdo_request_queue(void *ptr) +{ + uds_unregister_allocating_thread(); +} + +#ifdef MODULE +#define MODULE_NAME THIS_MODULE->name +#else +#define MODULE_NAME "dm-vdo" +#endif /* MODULE */ + +static const struct vdo_work_queue_type default_queue_type = { + .start = start_vdo_request_queue, + .finish = finish_vdo_request_queue, + .max_priority = VDO_DEFAULT_Q_MAX_PRIORITY, + .default_priority = VDO_DEFAULT_Q_COMPLETION_PRIORITY, +}; + +static const struct vdo_work_queue_type bio_ack_q_type = { + .start = NULL, + .finish = NULL, + .max_priority = BIO_ACK_Q_MAX_PRIORITY, + .default_priority = BIO_ACK_Q_ACK_PRIORITY, +}; + +static const struct vdo_work_queue_type cpu_q_type = { + .start = NULL, + .finish = NULL, + .max_priority = CPU_Q_MAX_PRIORITY, + .default_priority = CPU_Q_MAX_PRIORITY, +}; + +static void uninitialize_thread_config(struct thread_config *config) +{ + uds_free(uds_forget(config->logical_threads)); + uds_free(uds_forget(config->physical_threads)); + uds_free(uds_forget(config->hash_zone_threads)); + uds_free(uds_forget(config->bio_threads)); + memset(config, 0, sizeof(struct thread_config)); +} + +static void assign_thread_ids(struct thread_config *config, + thread_id_t thread_ids[], zone_count_t count) +{ + zone_count_t zone; + + for (zone = 0; zone < count; zone++) + thread_ids[zone] = config->thread_count++; +} + +/** + * initialize_thread_config() - Initialize the thread mapping + * + * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all + * three plus the packer and recovery journal. Otherwise, there must be at least one of each type, + * and each will have its own thread, as will the packer and recovery journal. + * + * Return: VDO_SUCCESS or an error. + */ +static int __must_check initialize_thread_config(struct thread_count_config counts, + struct thread_config *config) +{ + int result; + bool single = ((counts.logical_zones + counts.physical_zones + counts.hash_zones) == 0); + + config->bio_thread_count = counts.bio_threads; + if (single) { + config->logical_zone_count = 1; + config->physical_zone_count = 1; + config->hash_zone_count = 1; + } else { + config->logical_zone_count = counts.logical_zones; + config->physical_zone_count = counts.physical_zones; + config->hash_zone_count = counts.hash_zones; + } + + result = uds_allocate(config->logical_zone_count, thread_id_t, + "logical thread array", &config->logical_threads); + if (result != VDO_SUCCESS) { + uninitialize_thread_config(config); + return result; + } + + result = uds_allocate(config->physical_zone_count, thread_id_t, + "physical thread array", &config->physical_threads); + if (result != VDO_SUCCESS) { + uninitialize_thread_config(config); + return result; + } + + result = uds_allocate(config->hash_zone_count, thread_id_t, + "hash thread array", &config->hash_zone_threads); + if (result != VDO_SUCCESS) { + uninitialize_thread_config(config); + return result; + } + + result = uds_allocate(config->bio_thread_count, thread_id_t, + "bio thread array", &config->bio_threads); + if (result != VDO_SUCCESS) { + uninitialize_thread_config(config); + return result; + } + + if (single) { + config->logical_threads[0] = config->thread_count; + config->physical_threads[0] = config->thread_count; + config->hash_zone_threads[0] = config->thread_count++; + } else { + config->admin_thread = config->thread_count; + config->journal_thread = config->thread_count++; + config->packer_thread = config->thread_count++; + assign_thread_ids(config, config->logical_threads, counts.logical_zones); + assign_thread_ids(config, config->physical_threads, counts.physical_zones); + assign_thread_ids(config, config->hash_zone_threads, counts.hash_zones); + } + + config->dedupe_thread = config->thread_count++; + config->bio_ack_thread = + ((counts.bio_ack_threads > 0) ? config->thread_count++ : VDO_INVALID_THREAD_ID); + config->cpu_thread = config->thread_count++; + assign_thread_ids(config, config->bio_threads, counts.bio_threads); + return VDO_SUCCESS; +} + +/** + * vdo_read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block + * device. + * @vdo: The vdo whose geometry is to be read. + * + * Return: VDO_SUCCESS or an error code. + */ +static int __must_check read_geometry_block(struct vdo *vdo) +{ + struct vio *vio; + char *block; + int result; + + result = uds_allocate(VDO_BLOCK_SIZE, u8, __func__, &block); + if (result != VDO_SUCCESS) + return result; + + result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL, + block, &vio); + if (result != VDO_SUCCESS) { + uds_free(block); + return result; + } + + /* + * This is only safe because, having not already loaded the geometry, the vdo's geometry's + * bio_offset field is 0, so the fact that vio_reset_bio() will subtract that offset from + * the supplied pbn is not a problem. + */ + result = vio_reset_bio(vio, block, NULL, REQ_OP_READ, + VDO_GEOMETRY_BLOCK_LOCATION); + if (result != VDO_SUCCESS) { + free_vio(uds_forget(vio)); + uds_free(block); + return result; + } + + bio_set_dev(vio->bio, vdo_get_backing_device(vdo)); + submit_bio_wait(vio->bio); + result = blk_status_to_errno(vio->bio->bi_status); + free_vio(uds_forget(vio)); + if (result != 0) { + uds_log_error_strerror(result, "synchronous read failed"); + uds_free(block); + return -EIO; + } + + result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry); + uds_free(block); + return result; +} + +static bool get_zone_thread_name(const thread_id_t thread_ids[], zone_count_t count, + thread_id_t id, const char *prefix, + char *buffer, size_t buffer_length) +{ + if (id >= thread_ids[0]) { + thread_id_t index = id - thread_ids[0]; + + if (index < count) { + snprintf(buffer, buffer_length, "%s%d", prefix, index); + return true; + } + } + + return false; +} + +/** + * get_thread_name() - Format the name of the worker thread desired to support a given work queue. + * @thread_config: The thread configuration. + * @thread_id: The thread id. + * @buffer: Where to put the formatted name. + * @buffer_length: Size of the output buffer. + * + * The physical layer may add a prefix identifying the product; the output from this function + * should just identify the thread. + */ +static void get_thread_name(const struct thread_config *thread_config, + thread_id_t thread_id, char *buffer, size_t buffer_length) +{ + if (thread_id == thread_config->journal_thread) { + if (thread_config->packer_thread == thread_id) { + /* + * This is the "single thread" config where one thread is used for the + * journal, packer, logical, physical, and hash zones. In that case, it is + * known as the "request queue." + */ + snprintf(buffer, buffer_length, "reqQ"); + return; + } + + snprintf(buffer, buffer_length, "journalQ"); + return; + } else if (thread_id == thread_config->admin_thread) { + /* Theoretically this could be different from the journal thread. */ + snprintf(buffer, buffer_length, "adminQ"); + return; + } else if (thread_id == thread_config->packer_thread) { + snprintf(buffer, buffer_length, "packerQ"); + return; + } else if (thread_id == thread_config->dedupe_thread) { + snprintf(buffer, buffer_length, "dedupeQ"); + return; + } else if (thread_id == thread_config->bio_ack_thread) { + snprintf(buffer, buffer_length, "ackQ"); + return; + } else if (thread_id == thread_config->cpu_thread) { + snprintf(buffer, buffer_length, "cpuQ"); + return; + } + + if (get_zone_thread_name(thread_config->logical_threads, + thread_config->logical_zone_count, + thread_id, "logQ", buffer, buffer_length)) + return; + + if (get_zone_thread_name(thread_config->physical_threads, + thread_config->physical_zone_count, + thread_id, "physQ", buffer, buffer_length)) + return; + + if (get_zone_thread_name(thread_config->hash_zone_threads, + thread_config->hash_zone_count, + thread_id, "hashQ", buffer, buffer_length)) + return; + + if (get_zone_thread_name(thread_config->bio_threads, + thread_config->bio_thread_count, + thread_id, "bioQ", buffer, buffer_length)) + return; + + /* Some sort of misconfiguration? */ + snprintf(buffer, buffer_length, "reqQ%d", thread_id); +} + +/** + * vdo_make_thread() - Construct a single vdo work_queue and its associated thread (or threads for + * round-robin queues). + * @vdo: The vdo which owns the thread. + * @thread_id: The id of the thread to create (as determined by the thread_config). + * @type: The description of the work queue for this thread. + * @queue_count: The number of actual threads/queues contained in the "thread". + * @contexts: An array of queue_count contexts, one for each individual queue; may be NULL. + * + * Each "thread" constructed by this method is represented by a unique thread id in the thread + * config, and completions can be enqueued to the queue and run on the threads comprising this + * entity. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id, + const struct vdo_work_queue_type *type, + unsigned int queue_count, void *contexts[]) +{ + struct vdo_thread *thread = &vdo->threads[thread_id]; + char queue_name[MAX_VDO_WORK_QUEUE_NAME_LEN]; + + if (type == NULL) + type = &default_queue_type; + + if (thread->queue != NULL) { + return ASSERT(vdo_work_queue_type_is(thread->queue, type), + "already constructed vdo thread %u is of the correct type", + thread_id); + } + + thread->vdo = vdo; + thread->thread_id = thread_id; + get_thread_name(&vdo->thread_config, thread_id, queue_name, sizeof(queue_name)); + return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread, + type, queue_count, contexts, &thread->queue); +} + +/** + * register_vdo() - Register a VDO; it must not already be registered. + * @vdo: The vdo to register. + * + * Return: VDO_SUCCESS or an error. + */ +static int register_vdo(struct vdo *vdo) +{ + int result; + + write_lock(®istry.lock); + result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, + "VDO not already registered"); + if (result == VDO_SUCCESS) { + INIT_LIST_HEAD(&vdo->registration); + list_add_tail(&vdo->registration, ®istry.links); + } + write_unlock(®istry.lock); + + return result; +} + +/** + * initialize_vdo() - Do the portion of initializing a vdo which will clean up after itself on + * error. + * @vdo: The vdo being initialized + * @config: The configuration of the vdo + * @instance: The instance number of the vdo + * @reason: The buffer to hold the failure reason on error + */ +static int initialize_vdo(struct vdo *vdo, struct device_config *config, + unsigned int instance, char **reason) +{ + int result; + zone_count_t i; + + vdo->device_config = config; + vdo->starting_sector_offset = config->owning_target->begin; + vdo->instance = instance; + vdo->allocations_allowed = true; + vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_NEW); + INIT_LIST_HEAD(&vdo->device_config_list); + vdo_initialize_completion(&vdo->admin.completion, vdo, VDO_ADMIN_COMPLETION); + init_completion(&vdo->admin.callback_sync); + mutex_init(&vdo->stats_mutex); + result = read_geometry_block(vdo); + if (result != VDO_SUCCESS) { + *reason = "Could not load geometry block"; + return result; + } + + result = initialize_thread_config(config->thread_counts, &vdo->thread_config); + if (result != VDO_SUCCESS) { + *reason = "Cannot create thread configuration"; + return result; + } + + uds_log_info("zones: %d logical, %d physical, %d hash; total threads: %d", + config->thread_counts.logical_zones, + config->thread_counts.physical_zones, + config->thread_counts.hash_zones, vdo->thread_config.thread_count); + + /* Compression context storage */ + result = uds_allocate(config->thread_counts.cpu_threads, char *, "LZ4 context", + &vdo->compression_context); + if (result != VDO_SUCCESS) { + *reason = "cannot allocate LZ4 context"; + return result; + } + + for (i = 0; i < config->thread_counts.cpu_threads; i++) { + result = uds_allocate(LZ4_MEM_COMPRESS, char, "LZ4 context", + &vdo->compression_context[i]); + if (result != VDO_SUCCESS) { + *reason = "cannot allocate LZ4 context"; + return result; + } + } + + result = register_vdo(vdo); + if (result != VDO_SUCCESS) { + *reason = "Cannot add VDO to device registry"; + return result; + } + + vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_INITIALIZED); + return result; +} + +/** + * vdo_make() - Allocate and initialize a vdo. + * @instance: Device instantiation counter. + * @config: The device configuration. + * @reason: The reason for any failure during this call. + * @vdo_ptr: A pointer to hold the created vdo. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_make(unsigned int instance, struct device_config *config, char **reason, + struct vdo **vdo_ptr) +{ + int result; + struct vdo *vdo; + + /* VDO-3769 - Set a generic reason so we don't ever return garbage. */ + *reason = "Unspecified error"; + + result = uds_allocate(1, struct vdo, __func__, &vdo); + if (result != UDS_SUCCESS) { + *reason = "Cannot allocate VDO"; + return result; + } + + result = initialize_vdo(vdo, config, instance, reason); + if (result != VDO_SUCCESS) { + vdo_destroy(vdo); + return result; + } + + /* From here on, the caller will clean up if there is an error. */ + *vdo_ptr = vdo; + + snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix), + "%s%u", MODULE_NAME, instance); + BUG_ON(vdo->thread_name_prefix[0] == '\0'); + result = uds_allocate(vdo->thread_config.thread_count, + struct vdo_thread, __func__, &vdo->threads); + if (result != VDO_SUCCESS) { + *reason = "Cannot allocate thread structures"; + return result; + } + + result = vdo_make_thread(vdo, vdo->thread_config.admin_thread, + &default_queue_type, 1, NULL); + if (result != VDO_SUCCESS) { + *reason = "Cannot make admin thread"; + return result; + } + + result = vdo_make_flusher(vdo); + if (result != VDO_SUCCESS) { + *reason = "Cannot make flusher zones"; + return result; + } + + result = vdo_make_packer(vdo, DEFAULT_PACKER_BINS, &vdo->packer); + if (result != VDO_SUCCESS) { + *reason = "Cannot make packer zones"; + return result; + } + + BUG_ON(vdo->device_config->logical_block_size <= 0); + BUG_ON(vdo->device_config->owned_device == NULL); + result = make_data_vio_pool(vdo, MAXIMUM_VDO_USER_VIOS, + MAXIMUM_VDO_USER_VIOS * 3 / 4, + &vdo->data_vio_pool); + if (result != VDO_SUCCESS) { + *reason = "Cannot allocate data_vio pool"; + return result; + } + + result = vdo_make_io_submitter(config->thread_counts.bio_threads, + config->thread_counts.bio_rotation_interval, + get_data_vio_pool_request_limit(vdo->data_vio_pool), + vdo, &vdo->io_submitter); + if (result != VDO_SUCCESS) { + *reason = "bio submission initialization failed"; + return result; + } + + if (vdo_uses_bio_ack_queue(vdo)) { + result = vdo_make_thread(vdo, vdo->thread_config.bio_ack_thread, + &bio_ack_q_type, + config->thread_counts.bio_ack_threads, NULL); + if (result != VDO_SUCCESS) { + *reason = "bio ack queue initialization failed"; + return result; + } + } + + result = vdo_make_thread(vdo, vdo->thread_config.cpu_thread, &cpu_q_type, + config->thread_counts.cpu_threads, + (void **) vdo->compression_context); + if (result != VDO_SUCCESS) { + *reason = "CPU queue initialization failed"; + return result; + } + + return VDO_SUCCESS; +} + +static void finish_vdo(struct vdo *vdo) +{ + int i; + + if (vdo->threads == NULL) + return; + + vdo_cleanup_io_submitter(vdo->io_submitter); + vdo_finish_dedupe_index(vdo->hash_zones); + + for (i = 0; i < vdo->thread_config.thread_count; i++) + vdo_finish_work_queue(vdo->threads[i].queue); +} + +/** + * free_listeners() - Free the list of read-only listeners associated with a thread. + * @thread_data: The thread holding the list to free. + */ +static void free_listeners(struct vdo_thread *thread) +{ + struct read_only_listener *listener, *next; + + for (listener = uds_forget(thread->listeners); listener != NULL; listener = next) { + next = uds_forget(listener->next); + uds_free(listener); + } +} + +static void uninitialize_super_block(struct vdo_super_block *super_block) +{ + free_vio_components(&super_block->vio); + uds_free(super_block->buffer); +} + +/** + * unregister_vdo() - Remove a vdo from the device registry. + * @vdo: The vdo to remove. + */ +static void unregister_vdo(struct vdo *vdo) +{ + write_lock(®istry.lock); + if (filter_vdos_locked(vdo_is_equal, vdo) == vdo) + list_del_init(&vdo->registration); + + write_unlock(®istry.lock); +} + +/** + * vdo_destroy() - Destroy a vdo instance. + * @vdo: The vdo to destroy (may be NULL). + */ +void vdo_destroy(struct vdo *vdo) +{ + unsigned int i; + + if (vdo == NULL) + return; + + /* A running VDO should never be destroyed without suspending first. */ + BUG_ON(vdo_get_admin_state(vdo)->normal); + + vdo->allocations_allowed = true; + + /* Stop services that need to gather VDO statistics from the worker threads. */ + if (vdo->sysfs_added) { + init_completion(&vdo->stats_shutdown); + kobject_put(&vdo->stats_directory); + wait_for_completion(&vdo->stats_shutdown); + } + + finish_vdo(vdo); + unregister_vdo(vdo); + free_data_vio_pool(vdo->data_vio_pool); + vdo_free_io_submitter(uds_forget(vdo->io_submitter)); + vdo_free_flusher(uds_forget(vdo->flusher)); + vdo_free_packer(uds_forget(vdo->packer)); + vdo_free_recovery_journal(uds_forget(vdo->recovery_journal)); + vdo_free_slab_depot(uds_forget(vdo->depot)); + vdo_uninitialize_layout(&vdo->layout); + vdo_uninitialize_layout(&vdo->next_layout); + if (vdo->partition_copier) + dm_kcopyd_client_destroy(uds_forget(vdo->partition_copier)); + uninitialize_super_block(&vdo->super_block); + vdo_free_block_map(uds_forget(vdo->block_map)); + vdo_free_hash_zones(uds_forget(vdo->hash_zones)); + vdo_free_physical_zones(uds_forget(vdo->physical_zones)); + vdo_free_logical_zones(uds_forget(vdo->logical_zones)); + + if (vdo->threads != NULL) { + for (i = 0; i < vdo->thread_config.thread_count; i++) { + free_listeners(&vdo->threads[i]); + vdo_free_work_queue(uds_forget(vdo->threads[i].queue)); + } + uds_free(uds_forget(vdo->threads)); + } + + uninitialize_thread_config(&vdo->thread_config); + + if (vdo->compression_context != NULL) { + for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++) + uds_free(uds_forget(vdo->compression_context[i])); + + uds_free(uds_forget(vdo->compression_context)); + } + + /* + * The call to kobject_put on the kobj sysfs node will decrement its reference count; when + * the count goes to zero the VDO object will be freed as a side effect. + */ + if (!vdo->sysfs_added) + uds_free(vdo); + else + kobject_put(&vdo->vdo_directory); +} + +static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block) +{ + int result; + + result = uds_allocate(VDO_BLOCK_SIZE, char, "encoded super block", + (char **) &vdo->super_block.buffer); + if (result != VDO_SUCCESS) + return result; + + return allocate_vio_components(vdo, VIO_TYPE_SUPER_BLOCK, + VIO_PRIORITY_METADATA, NULL, 1, + (char *) super_block->buffer, + &vdo->super_block.vio); +} + +/** + * finish_reading_super_block() - Continue after loading the super block. + * @completion: The super block vio. + * + * This callback is registered in vdo_load_super_block(). + */ +static void finish_reading_super_block(struct vdo_completion *completion) +{ + struct vdo_super_block *super_block = + container_of(as_vio(completion), struct vdo_super_block, vio); + + vdo_continue_completion(uds_forget(completion->parent), + vdo_decode_super_block(super_block->buffer)); +} + +/** + * handle_super_block_read_error() - Handle an error reading the super block. + * @completion: The super block vio. + * + * This error handler is registered in vdo_load_super_block(). + */ +static void handle_super_block_read_error(struct vdo_completion *completion) +{ + vio_record_metadata_io_error(as_vio(completion)); + finish_reading_super_block(completion); +} + +static void read_super_block_endio(struct bio *bio) +{ + struct vio *vio = bio->bi_private; + struct vdo_completion *parent = vio->completion.parent; + + continue_vio_after_io(vio, finish_reading_super_block, + parent->callback_thread_id); +} + +/** + * vdo_load_super_block() - Allocate a super block and read its contents from storage. + * @vdo: The vdo containing the super block on disk. + * @parent: The completion to notify after loading the super block. + */ +void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent) +{ + int result; + + result = initialize_super_block(vdo, &vdo->super_block); + if (result != VDO_SUCCESS) { + vdo_continue_completion(parent, result); + return; + } + + vdo->super_block.vio.completion.parent = parent; + submit_metadata_vio(&vdo->super_block.vio, + vdo_get_data_region_start(vdo->geometry), + read_super_block_endio, + handle_super_block_read_error, + REQ_OP_READ); +} + +/** + * pool_stats_release() - Signal that sysfs stats have been shut down. + * @directory: The vdo stats directory. + */ +static void pool_stats_release(struct kobject *directory) +{ + struct vdo *vdo = container_of(directory, struct vdo, stats_directory); + + complete(&vdo->stats_shutdown); +} + +ATTRIBUTE_GROUPS(vdo_pool_stats); +static const struct kobj_type stats_directory_type = { + .release = pool_stats_release, + .sysfs_ops = &vdo_pool_stats_sysfs_ops, + .default_groups = vdo_pool_stats_groups, +}; + +/** + * vdo_add_sysfs_stats_dir() - Add the stats directory to the vdo sysfs directory. + * @vdo: The vdo. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_add_sysfs_stats_dir(struct vdo *vdo) +{ + int result; + + kobject_init(&vdo->stats_directory, &stats_directory_type); + result = kobject_add(&vdo->stats_directory, &vdo->vdo_directory, "statistics"); + if (result != 0) + return VDO_CANT_ADD_SYSFS_NODE; + + return VDO_SUCCESS; +} + +/** + * vdo_get_backing_device() - Get the block device object underlying a vdo. + * @vdo: The vdo. + * + * Return: The vdo's current block device. + */ +struct block_device *vdo_get_backing_device(const struct vdo *vdo) +{ + return vdo->device_config->owned_device->bdev; +} + +/** + * vdo_get_device_name() - Get the device name associated with the vdo target. + * @target: The target device interface. + * + * Return: The block device name. + */ +const char *vdo_get_device_name(const struct dm_target *target) +{ + return dm_device_name(dm_table_get_md(target->table)); +} + +/** + * vdo_synchronous_flush() - Issue a flush request and wait for it to complete. + * @vdo: The vdo. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_synchronous_flush(struct vdo *vdo) +{ + int result; + struct bio bio; + + bio_init(&bio, vdo_get_backing_device(vdo), 0, 0, REQ_OP_WRITE | REQ_PREFLUSH); + submit_bio_wait(&bio); + result = blk_status_to_errno(bio.bi_status); + + atomic64_inc(&vdo->stats.flush_out); + if (result != 0) { + uds_log_error_strerror(result, "synchronous flush failed"); + result = -EIO; + } + + bio_uninit(&bio); + return result; +} + +/** + * vdo_get_state() - Get the current state of the vdo. + * @vdo: The vdo. + + * Context: This method may be called from any thread. + * + * Return: The current state of the vdo. + */ +enum vdo_state vdo_get_state(const struct vdo *vdo) +{ + enum vdo_state state = atomic_read(&vdo->state); + + /* pairs with barriers where state field is changed */ + smp_rmb(); + return state; +} + +/** + * vdo_set_state() - Set the current state of the vdo. + * @vdo: The vdo whose state is to be set. + * @state: The new state of the vdo. + * + * Context: This method may be called from any thread. + */ +void vdo_set_state(struct vdo *vdo, enum vdo_state state) +{ + /* pairs with barrier in vdo_get_state */ + smp_wmb(); + atomic_set(&vdo->state, state); +} + +/** + * vdo_get_admin_state() - Get the admin state of the vdo. + * @vdo: The vdo. + * + * Return: The code for the vdo's current admin state. + */ +const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo) +{ + return vdo_get_admin_state_code(&vdo->admin.state); +} + +/** + * record_vdo() - Record the state of the VDO for encoding in the super block. + */ +static void record_vdo(struct vdo *vdo) +{ + /* This is for backwards compatibility. */ + vdo->states.unused = vdo->geometry.unused; + vdo->states.vdo.state = vdo_get_state(vdo); + vdo->states.block_map = vdo_record_block_map(vdo->block_map); + vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal); + vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot); + vdo->states.layout = vdo->layout; +} + +/** + * continue_super_block_parent() - Continue the parent of a super block save operation. + * @completion: The super block vio. + * + * This callback is registered in vdo_save_components(). + */ +static void continue_super_block_parent(struct vdo_completion *completion) +{ + vdo_continue_completion(uds_forget(completion->parent), completion->result); +} + +/** + * handle_save_error() - Log a super block save error. + * @completion: The super block vio. + * + * This error handler is registered in vdo_save_components(). + */ +static void handle_save_error(struct vdo_completion *completion) +{ + struct vdo_super_block *super_block = + container_of(as_vio(completion), struct vdo_super_block, vio); + + vio_record_metadata_io_error(&super_block->vio); + uds_log_error_strerror(completion->result, "super block save failed"); + /* + * Mark the super block as unwritable so that we won't attempt to write it again. This + * avoids the case where a growth attempt fails writing the super block with the new size, + * but the subsequent attempt to write out the read-only state succeeds. In this case, + * writes which happened just before the suspend would not be visible if the VDO is + * restarted without rebuilding, but, after a read-only rebuild, the effects of those + * writes would reappear. + */ + super_block->unwritable = true; + completion->callback(completion); +} + +static void super_block_write_endio(struct bio *bio) +{ + struct vio *vio = bio->bi_private; + struct vdo_completion *parent = vio->completion.parent; + + continue_vio_after_io(vio, continue_super_block_parent, + parent->callback_thread_id); +} + +/** + * vdo_save_components() - Encode the vdo and save the super block asynchronously. + * @vdo: The vdo whose state is being saved. + * @parent: The completion to notify when the save is complete. + */ +void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent) +{ + struct vdo_super_block *super_block = &vdo->super_block; + + if (super_block->unwritable) { + vdo_continue_completion(parent, VDO_READ_ONLY); + return; + } + + if (super_block->vio.completion.parent != NULL) { + vdo_continue_completion(parent, VDO_COMPONENT_BUSY); + return; + } + + record_vdo(vdo); + + vdo_encode_super_block(super_block->buffer, &vdo->states); + super_block->vio.completion.parent = parent; + super_block->vio.completion.callback_thread_id = parent->callback_thread_id; + submit_metadata_vio(&super_block->vio, + vdo_get_data_region_start(vdo->geometry), + super_block_write_endio, handle_save_error, + REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); +} + +/** + * vdo_register_read_only_listener() - Register a listener to be notified when the VDO goes + * read-only. + * @vdo: The vdo to register with. + * @listener: The object to notify. + * @notification: The function to call to send the notification. + * @thread_id: The id of the thread on which to send the notification. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_register_read_only_listener(struct vdo *vdo, void *listener, + vdo_read_only_notification_fn notification, + thread_id_t thread_id) +{ + struct vdo_thread *thread = &vdo->threads[thread_id]; + struct read_only_listener *read_only_listener; + int result; + + result = ASSERT(thread_id != vdo->thread_config.dedupe_thread, + "read only listener not registered on dedupe thread"); + if (result != VDO_SUCCESS) + return result; + + result = uds_allocate(1, struct read_only_listener, __func__, + &read_only_listener); + if (result != VDO_SUCCESS) + return result; + + *read_only_listener = (struct read_only_listener) { + .listener = listener, + .notify = notification, + .next = thread->listeners, + }; + + thread->listeners = read_only_listener; + return VDO_SUCCESS; +} + +/** + * notify_vdo_of_read_only_mode() - Notify a vdo that it is going read-only. + * @listener: The vdo. + * @parent: The completion to notify in order to acknowledge the notification. + * + * This will save the read-only state to the super block. + * + * Implements vdo_read_only_notification_fn. + */ +static void notify_vdo_of_read_only_mode(void *listener, struct vdo_completion *parent) +{ + struct vdo *vdo = listener; + + if (vdo_in_read_only_mode(vdo)) + vdo_finish_completion(parent); + + vdo_set_state(vdo, VDO_READ_ONLY_MODE); + vdo_save_components(vdo, parent); +} + +/** + * vdo_enable_read_only_entry() - Enable a vdo to enter read-only mode on errors. + * @vdo: The vdo to enable. + * + * Return: VDO_SUCCESS or an error. + */ +int vdo_enable_read_only_entry(struct vdo *vdo) +{ + thread_id_t id; + bool is_read_only = vdo_in_read_only_mode(vdo); + struct read_only_notifier *notifier = &vdo->read_only_notifier; + + if (is_read_only) { + notifier->read_only_error = VDO_READ_ONLY; + notifier->state = NOTIFIED; + } else { + notifier->state = MAY_NOT_NOTIFY; + } + + spin_lock_init(¬ifier->lock); + vdo_initialize_completion(¬ifier->completion, vdo, + VDO_READ_ONLY_MODE_COMPLETION); + + for (id = 0; id < vdo->thread_config.thread_count; id++) + vdo->threads[id].is_read_only = is_read_only; + + return vdo_register_read_only_listener(vdo, vdo, notify_vdo_of_read_only_mode, + vdo->thread_config.admin_thread); +} + +/** + * vdo_wait_until_not_entering_read_only_mode() - Wait until no read-only notifications are in + * progress and prevent any subsequent + * notifications. + * @parent: The completion to notify when no threads are entering read-only mode. + * + * Notifications may be re-enabled by calling vdo_allow_read_only_mode_entry(). + */ +void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent) +{ + struct vdo *vdo = parent->vdo; + struct read_only_notifier *notifier = &vdo->read_only_notifier; + + vdo_assert_on_admin_thread(vdo, __func__); + + if (notifier->waiter != NULL) { + vdo_continue_completion(parent, VDO_COMPONENT_BUSY); + return; + } + + spin_lock(¬ifier->lock); + if (notifier->state == NOTIFYING) + notifier->waiter = parent; + else if (notifier->state == MAY_NOTIFY) + notifier->state = MAY_NOT_NOTIFY; + spin_unlock(¬ifier->lock); + + if (notifier->waiter == NULL) { + /* + * A notification was not in progress, and now they are + * disallowed. + */ + vdo_launch_completion(parent); + return; + } +} + +/** + * as_notifier() - Convert a generic vdo_completion to a read_only_notifier. + * @completion: The completion to convert. + * + * Return: The completion as a read_only_notifier. + */ +static inline struct read_only_notifier *as_notifier(struct vdo_completion *completion) +{ + vdo_assert_completion_type(completion, VDO_READ_ONLY_MODE_COMPLETION); + return container_of(completion, struct read_only_notifier, completion); +} + +/** + * finish_entering_read_only_mode() - Complete the process of entering read only mode. + * @completion: The read-only mode completion. + */ +static void finish_entering_read_only_mode(struct vdo_completion *completion) +{ + struct read_only_notifier *notifier = as_notifier(completion); + + vdo_assert_on_admin_thread(completion->vdo, __func__); + + spin_lock(¬ifier->lock); + notifier->state = NOTIFIED; + spin_unlock(¬ifier->lock); + + if (notifier->waiter != NULL) + vdo_continue_completion(uds_forget(notifier->waiter), + completion->result); +} + +/** + * make_thread_read_only() - Inform each thread that the VDO is in read-only mode. + * @completion: The read-only mode completion. + */ +static void make_thread_read_only(struct vdo_completion *completion) +{ + struct vdo *vdo = completion->vdo; + thread_id_t thread_id = completion->callback_thread_id; + struct read_only_notifier *notifier = as_notifier(completion); + struct read_only_listener *listener = completion->parent; + + if (listener == NULL) { + /* This is the first call on this thread */ + struct vdo_thread *thread = &vdo->threads[thread_id]; + + thread->is_read_only = true; + listener = thread->listeners; + if (thread_id == 0) + uds_log_error_strerror(READ_ONCE(notifier->read_only_error), + "Unrecoverable error, entering read-only mode"); + } else { + /* We've just finished notifying a listener */ + listener = listener->next; + } + + if (listener != NULL) { + /* We have a listener to notify */ + vdo_prepare_completion(completion, make_thread_read_only, + make_thread_read_only, thread_id, + listener); + listener->notify(listener->listener, completion); + return; + } + + /* We're done with this thread */ + if (++thread_id == vdo->thread_config.dedupe_thread) { + /* + * We don't want to notify the dedupe thread since it may be + * blocked rebuilding the index. + */ + thread_id++; + } + + if (thread_id >= vdo->thread_config.thread_count) { + /* There are no more threads */ + vdo_prepare_completion(completion, finish_entering_read_only_mode, + finish_entering_read_only_mode, + vdo->thread_config.admin_thread, NULL); + } else { + vdo_prepare_completion(completion, make_thread_read_only, + make_thread_read_only, thread_id, NULL); + } + + vdo_launch_completion(completion); +} + +/** + * vdo_allow_read_only_mode_entry() - Allow the notifier to put the VDO into read-only mode, + * reversing the effects of + * vdo_wait_until_not_entering_read_only_mode(). + * @parent: The object to notify once the operation is complete. + * + * If some thread tried to put the vdo into read-only mode while notifications were disallowed, it + * will be done when this method is called. If that happens, the parent will not be notified until + * the vdo has actually entered read-only mode and attempted to save the super block. + * + * Context: This method may only be called from the admin thread. + */ +void vdo_allow_read_only_mode_entry(struct vdo_completion *parent) +{ + struct vdo *vdo = parent->vdo; + struct read_only_notifier *notifier = &vdo->read_only_notifier; + + vdo_assert_on_admin_thread(vdo, __func__); + + if (notifier->waiter != NULL) { + vdo_continue_completion(parent, VDO_COMPONENT_BUSY); + return; + } + + spin_lock(¬ifier->lock); + if (notifier->state == MAY_NOT_NOTIFY) { + if (notifier->read_only_error == VDO_SUCCESS) { + notifier->state = MAY_NOTIFY; + } else { + notifier->state = NOTIFYING; + notifier->waiter = parent; + } + } + spin_unlock(¬ifier->lock); + + if (notifier->waiter == NULL) { + /* We're done */ + vdo_launch_completion(parent); + return; + } + + /* Do the pending notification. */ + make_thread_read_only(¬ifier->completion); +} + +/** + * vdo_enter_read_only_mode() - Put a VDO into read-only mode and save the read-only state in the + * super block. + * @vdo: The vdo. + * @error_code: The error which caused the VDO to enter read-only mode. + * + * This method is a no-op if the VDO is already read-only. + */ +void vdo_enter_read_only_mode(struct vdo *vdo, int error_code) +{ + bool notify = false; + thread_id_t thread_id = vdo_get_callback_thread_id(); + struct read_only_notifier *notifier = &vdo->read_only_notifier; + struct vdo_thread *thread; + + if (thread_id != VDO_INVALID_THREAD_ID) { + thread = &vdo->threads[thread_id]; + if (thread->is_read_only) { + /* This thread has already gone read-only. */ + return; + } + + /* Record for this thread that the VDO is read-only. */ + thread->is_read_only = true; + } + + spin_lock(¬ifier->lock); + if (notifier->read_only_error == VDO_SUCCESS) { + WRITE_ONCE(notifier->read_only_error, error_code); + if (notifier->state == MAY_NOTIFY) { + notifier->state = NOTIFYING; + notify = true; + } + } + spin_unlock(¬ifier->lock); + + if (!notify) { + /* The notifier is already aware of a read-only error */ + return; + } + + /* Initiate a notification starting on the lowest numbered thread. */ + vdo_launch_completion_callback(¬ifier->completion, make_thread_read_only, 0); +} + +/** + * vdo_is_read_only() - Check whether the VDO is read-only. + * @vdo: The vdo. + * + * Return: true if the vdo is read-only. + * + * This method may be called from any thread, as opposed to examining the VDO's state field which + * is only safe to check from the admin thread. + */ +bool vdo_is_read_only(struct vdo *vdo) +{ + return vdo->threads[vdo_get_callback_thread_id()].is_read_only; +} + +/** + * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode. + * @vdo: The vdo to query. + * + * Return: true if the vdo is in read-only mode. + */ +bool vdo_in_read_only_mode(const struct vdo *vdo) +{ + return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE); +} + +/** + * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode. + * @vdo: The vdo to query. + * + * Return: true if the vdo is in recovery mode. + */ +bool vdo_in_recovery_mode(const struct vdo *vdo) +{ + return (vdo_get_state(vdo) == VDO_RECOVERING); +} + +/** + * vdo_enter_recovery_mode() - Put the vdo into recovery mode. + * @vdo: The vdo. + */ +void vdo_enter_recovery_mode(struct vdo *vdo) +{ + vdo_assert_on_admin_thread(vdo, __func__); + + if (vdo_in_read_only_mode(vdo)) + return; + + uds_log_info("Entering recovery mode"); + vdo_set_state(vdo, VDO_RECOVERING); +} + +/** + * complete_synchronous_action() - Signal the waiting thread that a synchronous action is complete. + * @completion: The sync completion. + */ +static void complete_synchronous_action(struct vdo_completion *completion) +{ + vdo_assert_completion_type(completion, VDO_SYNC_COMPLETION); + complete(&(container_of(completion, struct sync_completion, + vdo_completion)->completion)); +} + +/** + * perform_synchronous_action() - Launch an action on a VDO thread and wait for it to complete. + * @vdo: The vdo. + * @action: The callback to launch. + * @thread_id: The thread on which to run the action. + * @parent: The parent of the sync completion (may be NULL). + */ +static int perform_synchronous_action(struct vdo *vdo, vdo_action_fn action, + thread_id_t thread_id, void *parent) +{ + struct sync_completion sync; + + vdo_initialize_completion(&sync.vdo_completion, vdo, VDO_SYNC_COMPLETION); + init_completion(&sync.completion); + sync.vdo_completion.parent = parent; + vdo_launch_completion_callback(&sync.vdo_completion, action, thread_id); + wait_for_completion(&sync.completion); + return sync.vdo_completion.result; +} + +/** + * set_compression_callback() - Callback to turn compression on or off. + * @completion: The completion. + */ +static void set_compression_callback(struct vdo_completion *completion) +{ + struct vdo *vdo = completion->vdo; + bool *enable = completion->parent; + bool was_enabled = vdo_get_compressing(vdo); + + if (*enable != was_enabled) { + WRITE_ONCE(vdo->compressing, *enable); + if (was_enabled) { + /* Signal the packer to flush since compression has been disabled. */ + vdo_flush_packer(vdo->packer); + } + } + + uds_log_info("compression is %s", (*enable ? "enabled" : "disabled")); + *enable = was_enabled; + complete_synchronous_action(completion); +} + +/** + * vdo_set_compressing() - Turn compression on or off. + * @vdo: The vdo. + * @enable: Whether to enable or disable compression. + * + * Return: Whether compression was previously on or off. + */ +bool vdo_set_compressing(struct vdo *vdo, bool enable) +{ + perform_synchronous_action(vdo, set_compression_callback, + vdo->thread_config.packer_thread, + &enable); + return enable; +} + +/** + * vdo_get_compressing() - Get whether compression is enabled in a vdo. + * @vdo: The vdo. + * + * Return: State of compression. + */ +bool vdo_get_compressing(struct vdo *vdo) +{ + return READ_ONCE(vdo->compressing); +} + +static size_t get_block_map_cache_size(const struct vdo *vdo) +{ + return ((size_t) vdo->device_config->cache_size) * VDO_BLOCK_SIZE; +} + +static struct error_statistics __must_check get_vdo_error_statistics(const struct vdo *vdo) +{ + /* + * The error counts can be incremented from arbitrary threads and so must be incremented + * atomically, but they are just statistics with no semantics that could rely on memory + * order, so unfenced reads are sufficient. + */ + const struct atomic_statistics *atoms = &vdo->stats; + + return (struct error_statistics) { + .invalid_advice_pbn_count = atomic64_read(&atoms->invalid_advice_pbn_count), + .no_space_error_count = atomic64_read(&atoms->no_space_error_count), + .read_only_error_count = atomic64_read(&atoms->read_only_error_count), + }; +} + +static void copy_bio_stat(struct bio_stats *b, const struct atomic_bio_stats *a) +{ + b->read = atomic64_read(&a->read); + b->write = atomic64_read(&a->write); + b->discard = atomic64_read(&a->discard); + b->flush = atomic64_read(&a->flush); + b->empty_flush = atomic64_read(&a->empty_flush); + b->fua = atomic64_read(&a->fua); +} + +static struct bio_stats subtract_bio_stats(struct bio_stats minuend, + struct bio_stats subtrahend) +{ + return (struct bio_stats) { + .read = minuend.read - subtrahend.read, + .write = minuend.write - subtrahend.write, + .discard = minuend.discard - subtrahend.discard, + .flush = minuend.flush - subtrahend.flush, + .empty_flush = minuend.empty_flush - subtrahend.empty_flush, + .fua = minuend.fua - subtrahend.fua, + }; +} + +/** + * vdo_get_physical_blocks_allocated() - Get the number of physical blocks in use by user data. + * @vdo: The vdo. + * + * Return: The number of blocks allocated for user data. + */ +static block_count_t __must_check vdo_get_physical_blocks_allocated(const struct vdo *vdo) +{ + return (vdo_get_slab_depot_allocated_blocks(vdo->depot) - + vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal)); +} + +/** + * vdo_get_physical_blocks_overhead() - Get the number of physical blocks used by vdo metadata. + * @vdo: The vdo. + * + * Return: The number of overhead blocks. + */ +static block_count_t __must_check vdo_get_physical_blocks_overhead(const struct vdo *vdo) +{ + /* + * config.physical_blocks is mutated during resize and is in a packed structure, + * but resize runs on admin thread. + * TODO: Verify that this is always safe. + */ + return (vdo->states.vdo.config.physical_blocks - + vdo_get_slab_depot_data_blocks(vdo->depot) + + vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal)); +} + +static const char *vdo_describe_state(enum vdo_state state) +{ + /* These strings should all fit in the 15 chars of VDOStatistics.mode. */ + switch (state) { + case VDO_RECOVERING: + return "recovering"; + + case VDO_READ_ONLY_MODE: + return "read-only"; + + default: + return "normal"; + } +} + +/** + * get_vdo_statistics() - Populate a vdo_statistics structure on the admin thread. + * @vdo: The vdo. + * @stats: The statistics structure to populate. + */ +static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *stats) +{ + struct recovery_journal *journal = vdo->recovery_journal; + enum vdo_state state = vdo_get_state(vdo); + + vdo_assert_on_admin_thread(vdo, __func__); + + /* start with a clean slate */ + memset(stats, 0, sizeof(struct vdo_statistics)); + + /* + * These are immutable properties of the vdo object, so it is safe to query them from any + * thread. + */ + stats->version = STATISTICS_VERSION; + stats->logical_blocks = vdo->states.vdo.config.logical_blocks; + /* + * config.physical_blocks is mutated during resize and is in a packed structure, but resize + * runs on the admin thread. + * TODO: verify that this is always safe + */ + stats->physical_blocks = vdo->states.vdo.config.physical_blocks; + stats->block_size = VDO_BLOCK_SIZE; + stats->complete_recoveries = vdo->states.vdo.complete_recoveries; + stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries; + stats->block_map_cache_size = get_block_map_cache_size(vdo); + + /* The callees are responsible for thread-safety. */ + stats->data_blocks_used = vdo_get_physical_blocks_allocated(vdo); + stats->overhead_blocks_used = vdo_get_physical_blocks_overhead(vdo); + stats->logical_blocks_used = vdo_get_recovery_journal_logical_blocks_used(journal); + vdo_get_slab_depot_statistics(vdo->depot, stats); + stats->journal = vdo_get_recovery_journal_statistics(journal); + stats->packer = vdo_get_packer_statistics(vdo->packer); + stats->block_map = vdo_get_block_map_statistics(vdo->block_map); + vdo_get_dedupe_statistics(vdo->hash_zones, stats); + stats->errors = get_vdo_error_statistics(vdo); + stats->in_recovery_mode = (state == VDO_RECOVERING); + snprintf(stats->mode, sizeof(stats->mode), "%s", vdo_describe_state(state)); + + stats->instance = vdo->instance; + stats->current_vios_in_progress = get_data_vio_pool_active_requests(vdo->data_vio_pool); + stats->max_vios = get_data_vio_pool_maximum_requests(vdo->data_vio_pool); + + stats->flush_out = atomic64_read(&vdo->stats.flush_out); + stats->logical_block_size = vdo->device_config->logical_block_size; + copy_bio_stat(&stats->bios_in, &vdo->stats.bios_in); + copy_bio_stat(&stats->bios_in_partial, &vdo->stats.bios_in_partial); + copy_bio_stat(&stats->bios_out, &vdo->stats.bios_out); + copy_bio_stat(&stats->bios_meta, &vdo->stats.bios_meta); + copy_bio_stat(&stats->bios_journal, &vdo->stats.bios_journal); + copy_bio_stat(&stats->bios_page_cache, &vdo->stats.bios_page_cache); + copy_bio_stat(&stats->bios_out_completed, &vdo->stats.bios_out_completed); + copy_bio_stat(&stats->bios_meta_completed, &vdo->stats.bios_meta_completed); + copy_bio_stat(&stats->bios_journal_completed, + &vdo->stats.bios_journal_completed); + copy_bio_stat(&stats->bios_page_cache_completed, + &vdo->stats.bios_page_cache_completed); + copy_bio_stat(&stats->bios_acknowledged, &vdo->stats.bios_acknowledged); + copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial); + stats->bios_in_progress = + subtract_bio_stats(stats->bios_in, stats->bios_acknowledged); + uds_get_memory_stats(&stats->memory_usage.bytes_used, + &stats->memory_usage.peak_bytes_used); +} + +/** + * vdo_fetch_statistics_callback() - Action to populate a vdo_statistics + * structure on the admin thread. + * @completion: The completion. + * + * This callback is registered in vdo_fetch_statistics(). + */ +static void vdo_fetch_statistics_callback(struct vdo_completion *completion) +{ + get_vdo_statistics(completion->vdo, completion->parent); + complete_synchronous_action(completion); +} + +/** + * vdo_fetch_statistics() - Fetch statistics on the correct thread. + * @vdo: The vdo. + * @stats: The vdo statistics are returned here. + */ +void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats) +{ + perform_synchronous_action(vdo, vdo_fetch_statistics_callback, + vdo->thread_config.admin_thread, stats); +} + +/** + * vdo_get_callback_thread_id() - Get the id of the callback thread on which a completion is + * currently running. + * + * Return: The current thread ID, or -1 if no such thread. + */ +thread_id_t vdo_get_callback_thread_id(void) +{ + struct vdo_work_queue *queue = vdo_get_current_work_queue(); + struct vdo_thread *thread; + thread_id_t thread_id; + + if (queue == NULL) + return VDO_INVALID_THREAD_ID; + + thread = vdo_get_work_queue_owner(queue); + thread_id = thread->thread_id; + + if (PARANOID_THREAD_CONSISTENCY_CHECKS) { + BUG_ON(thread_id >= thread->vdo->thread_config.thread_count); + BUG_ON(thread != &thread->vdo->threads[thread_id]); + } + + return thread_id; +} + +/** + * vdo_dump_status() - Dump status information about a vdo to the log for debugging. + * @vdo: The vdo to dump. + */ +void vdo_dump_status(const struct vdo *vdo) +{ + zone_count_t zone; + + vdo_dump_flusher(vdo->flusher); + vdo_dump_recovery_journal_statistics(vdo->recovery_journal); + vdo_dump_packer(vdo->packer); + vdo_dump_slab_depot(vdo->depot); + + for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++) + vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]); + + for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++) + vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]); + + vdo_dump_hash_zones(vdo->hash_zones); +} + +/** + * vdo_assert_on_admin_thread() - Assert that we are running on the admin thread. + * @vdo: The vdo. + * @name: The name of the function which should be running on the admin thread (for logging). + */ +void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) +{ + ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), + "%s called on admin thread", name); +} + +/** + * vdo_assert_on_logical_zone_thread() - Assert that this function was called on the specified + * logical zone thread. + * @vdo: The vdo. + * @logical_zone: The number of the logical zone. + * @name: The name of the calling function. + */ +void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone, + const char *name) +{ + ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == + vdo->thread_config.logical_threads[logical_zone]), + "%s called on logical thread", name); +} + +/** + * vdo_assert_on_physical_zone_thread() - Assert that this function was called on the specified + * physical zone thread. + * @vdo: The vdo. + * @physical_zone: The number of the physical zone. + * @name: The name of the calling function. + */ +void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, + zone_count_t physical_zone, const char *name) +{ + ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == + vdo->thread_config.physical_threads[physical_zone]), + "%s called on physical thread", name); +} + +/** + * vdo_get_physical_zone() - Get the physical zone responsible for a given physical block number. + * @vdo: The vdo containing the physical zones. + * @pbn: The PBN of the data block. + * @zone_ptr: A pointer to return the physical zone. + * + * Gets the physical zone responsible for a given physical block number of a data block in this vdo + * instance, or of the zero block (for which a NULL zone is returned). For any other block number + * that is not in the range of valid data block numbers in any slab, an error will be returned. + * This function is safe to call on invalid block numbers; it will not put the vdo into read-only + * mode. + * + * Return: VDO_SUCCESS or VDO_OUT_OF_RANGE if the block number is invalid or an error code for any + * other failure. + */ +int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn, + struct physical_zone **zone_ptr) +{ + struct vdo_slab *slab; + int result; + + if (pbn == VDO_ZERO_BLOCK) { + *zone_ptr = NULL; + return VDO_SUCCESS; + } + + /* + * Used because it does a more restrictive bounds check than vdo_get_slab(), and done first + * because it won't trigger read-only mode on an invalid PBN. + */ + if (!vdo_is_physical_data_block(vdo->depot, pbn)) + return VDO_OUT_OF_RANGE; + + /* With the PBN already checked, we should always succeed in finding a slab. */ + slab = vdo_get_slab(vdo->depot, pbn); + result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); + if (result != VDO_SUCCESS) + return result; + + *zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number]; + return VDO_SUCCESS; +} -- cgit v1.2.3 From f7f46761ccd9b46392ff14e22b8c2ed9f5ecc06d Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 25 Aug 2023 14:36:46 -0400 Subject: dm vdo io-submitter: rename to vdo_submit_metadata_vio Rename submit_metadata_vio() to vdo_submit_metadata_vio(). Reviewed-by: Susan LeGendre-McGhee Signed-off-by: Mike Snitzer Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/block-map.c | 38 +++++++++++++++++----------------- drivers/md/dm-vdo/io-submitter.c | 10 ++++----- drivers/md/dm-vdo/io-submitter.h | 20 +++++++++--------- drivers/md/dm-vdo/recovery-journal.c | 4 ++-- drivers/md/dm-vdo/repair.c | 7 +++---- drivers/md/dm-vdo/slab-depot.c | 40 ++++++++++++++++++------------------ drivers/md/dm-vdo/vdo.c | 18 ++++++++-------- 7 files changed, 68 insertions(+), 69 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index e703cb9eafc6..0df1517294e1 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -786,8 +786,8 @@ static int __must_check launch_page_load(struct page_info *info, cache->outstanding_reads++; ADD_ONCE(cache->stats.pages_loaded, 1); callback = (cache->rebuilding ? handle_rebuild_read_error : handle_load_error); - submit_metadata_vio(info->vio, pbn, load_cache_page_endio, - callback, REQ_OP_READ | REQ_PRIO); + vdo_submit_metadata_vio(info->vio, pbn, load_cache_page_endio, + callback, REQ_OP_READ | REQ_PRIO); return VDO_SUCCESS; } @@ -1055,10 +1055,10 @@ static void page_is_written_out(struct vdo_completion *completion) if (!page->header.initialized) { page->header.initialized = true; - submit_metadata_vio(info->vio, info->pbn, - write_cache_page_endio, - handle_page_write_error, - (REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH)); + vdo_submit_metadata_vio(info->vio, info->pbn, + write_cache_page_endio, + handle_page_write_error, + REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH); return; } @@ -1123,8 +1123,8 @@ static void write_pages(struct vdo_completion *flush_completion) continue; } ADD_ONCE(info->cache->stats.pages_saved, 1); - submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio, - handle_page_write_error, REQ_OP_WRITE | REQ_PRIO); + vdo_submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio, + handle_page_write_error, REQ_OP_WRITE | REQ_PRIO); } if (has_unflushed_pages) { @@ -1632,9 +1632,9 @@ static void write_initialized_page(struct vdo_completion *completion) if (zone->flusher == tree_page) operation |= REQ_PREFLUSH; - submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page), - write_page_endio, handle_write_error, - operation); + vdo_submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page), + write_page_endio, handle_write_error, + operation); } static void write_page_endio(struct bio *bio) @@ -1689,9 +1689,9 @@ static void write_page(struct tree_page *tree_page, struct pooled_vio *vio) } page->header.initialized = true; - submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page), - write_page_endio, handle_write_error, - REQ_OP_WRITE | REQ_PRIO); + vdo_submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page), + write_page_endio, handle_write_error, + REQ_OP_WRITE | REQ_PRIO); } /* Release a lock on a page which was being loaded or allocated. */ @@ -1879,8 +1879,8 @@ static void load_page(struct waiter *waiter, void *context) physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn; pooled->vio.completion.parent = data_vio; - submit_metadata_vio(&pooled->vio, pbn, load_page_endio, - handle_io_error, REQ_OP_READ | REQ_PRIO); + vdo_submit_metadata_vio(&pooled->vio, pbn, load_page_endio, + handle_io_error, REQ_OP_READ | REQ_PRIO); } /* @@ -2613,9 +2613,9 @@ static void traverse(struct cursor *cursor) next_level->page_index = entry_index; next_level->slot = 0; level->slot++; - submit_metadata_vio(&cursor->vio->vio, location.pbn, - traversal_endio, continue_traversal, - REQ_OP_READ | REQ_PRIO); + vdo_submit_metadata_vio(&cursor->vio->vio, location.pbn, + traversal_endio, continue_traversal, + REQ_OP_READ | REQ_PRIO); return; } } diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 6952ee572a7b..8ca9825357e6 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -320,7 +320,7 @@ void submit_data_vio_io(struct data_vio *data_vio) } /** - * vdo_submit_metadata_io() - Submit I/O for a metadata vio. + * __submit_metadata_vio() - Submit I/O for a metadata vio. * @vio: the vio for which to issue I/O * @physical: the physical block number to read or write * @callback: the bio endio function which will be called after the I/O completes @@ -336,12 +336,12 @@ void submit_data_vio_io(struct data_vio *data_vio) * no error can occur on the bio queue. Currently this is true for all callers, but additional care * will be needed if this ever changes. */ -void vdo_submit_metadata_io(struct vio *vio, physical_block_number_t physical, - bio_end_io_t callback, vdo_action_fn error_handler, - unsigned int operation, char *data) +void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, + bio_end_io_t callback, vdo_action_fn error_handler, + unsigned int operation, char *data) { - struct vdo_completion *completion = &vio->completion; int result; + struct vdo_completion *completion = &vio->completion; const struct admin_state_code *code = vdo_get_admin_state(completion->vdo); diff --git a/drivers/md/dm-vdo/io-submitter.h b/drivers/md/dm-vdo/io-submitter.h index 96befb3739e9..9e97cfc91bee 100644 --- a/drivers/md/dm-vdo/io-submitter.h +++ b/drivers/md/dm-vdo/io-submitter.h @@ -24,24 +24,24 @@ void process_vio_io(struct vdo_completion *completion); void submit_data_vio_io(struct data_vio *data_vio); -void vdo_submit_metadata_io(struct vio *vio, physical_block_number_t physical, - bio_end_io_t callback, vdo_action_fn error_handler, - unsigned int operation, char *data); +void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, + bio_end_io_t callback, vdo_action_fn error_handler, + unsigned int operation, char *data); -static inline void submit_metadata_vio(struct vio *vio, physical_block_number_t physical, - bio_end_io_t callback, vdo_action_fn error_handler, - unsigned int operation) +static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical, + bio_end_io_t callback, vdo_action_fn error_handler, + unsigned int operation) { - vdo_submit_metadata_io(vio, physical, callback, error_handler, - operation, vio->data); + __submit_metadata_vio(vio, physical, callback, error_handler, + operation, vio->data); } static inline void submit_flush_vio(struct vio *vio, bio_end_io_t callback, vdo_action_fn error_handler) { /* FIXME: Can we just use REQ_OP_FLUSH? */ - vdo_submit_metadata_io(vio, 0, callback, error_handler, - REQ_OP_WRITE | REQ_PREFLUSH, NULL); + __submit_metadata_vio(vio, 0, callback, error_handler, + REQ_OP_WRITE | REQ_PREFLUSH, NULL); } #endif /* VDO_IO_SUBMITTER_H */ diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index 463f35ae1430..55411eff5bd7 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -1394,8 +1394,8 @@ static void write_block(struct waiter *waiter, void *context __always_unused) * the data being referenced is stable. The FUA is necessary to ensure that the journal * block itself is stable before allowing overwrites of the lbn's previous data. */ - submit_metadata_vio(&block->vio, journal->origin + block->block_number, - complete_write_endio, handle_write_error, WRITE_FLAGS); + vdo_submit_metadata_vio(&block->vio, journal->origin + block->block_number, + complete_write_endio, handle_write_error, WRITE_FLAGS); } diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index 2cf99a7ce958..a75278eb8aa4 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -1748,10 +1748,9 @@ void vdo_repair(struct vdo_completion *parent) remaining -= blocks; } - for (vio_count = 0; - vio_count < repair->vio_count; + for (vio_count = 0; vio_count < repair->vio_count; vio_count++, pbn += MAX_BLOCKS_PER_VIO) { - submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio, - handle_journal_load_error, REQ_OP_READ); + vdo_submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio, + handle_journal_load_error, REQ_OP_READ); } } diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 1ad97d2bf80b..5fc4a1cdfafc 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -338,8 +338,8 @@ static void launch_write(struct slab_summary_block *block) pbn = (depot->summary_origin + (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) + block->index); - submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio, - handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH); + vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio, + handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH); } /** @@ -771,8 +771,8 @@ static void write_slab_journal_block(struct waiter *waiter, void *context) * This block won't be read in recovery until the slab summary is updated to refer to it. * The slab summary update does a flush which is sufficient to protect us from VDO-2331. */ - submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio, - complete_write, REQ_OP_WRITE); + vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio, + complete_write, REQ_OP_WRITE); /* Since the write is submitted, the tail block structure can be reused. */ journal->tail++; @@ -1205,8 +1205,8 @@ static void write_reference_block(struct waiter *waiter, void *context) block->slab->allocator->ref_counts_statistics.blocks_written + 1); completion->callback_thread_id = ((struct block_allocator *) pooled->context)->thread_id; - submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio, - handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH); + vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio, + handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH); } static void reclaim_journal_space(struct slab_journal *journal) @@ -2268,9 +2268,9 @@ static void load_reference_block(struct waiter *waiter, void *context) size_t block_offset = (block - block->slab->reference_blocks); vio->completion.parent = block; - submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset, - load_reference_block_endio, handle_io_error, - REQ_OP_READ); + vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset, + load_reference_block_endio, handle_io_error, + REQ_OP_READ); } /** @@ -2475,9 +2475,9 @@ static void read_slab_journal_tail(struct waiter *waiter, void *context) vio->completion.parent = journal; vio->completion.callback_thread_id = slab->allocator->thread_id; - submit_metadata_vio(vio, slab->journal_origin + tail_block, - read_slab_journal_tail_endio, handle_load_error, - REQ_OP_READ); + vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block, + read_slab_journal_tail_endio, handle_load_error, + REQ_OP_READ); } /** @@ -2915,9 +2915,9 @@ static void start_scrubbing(struct vdo_completion *completion) return; } - submit_metadata_vio(&scrubber->vio, slab->journal_origin, - read_slab_journal_endio, handle_scrubber_error, - REQ_OP_READ); + vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin, + read_slab_journal_endio, handle_scrubber_error, + REQ_OP_READ); } /** @@ -4513,9 +4513,9 @@ static void finish_loading_summary(struct vdo_completion *completion) combine_summaries(depot); /* Write the combined summary back out. */ - submit_metadata_vio(as_vio(completion), depot->summary_origin, - write_summary_endio, handle_combining_error, - REQ_OP_WRITE); + vdo_submit_metadata_vio(as_vio(completion), depot->summary_origin, + write_summary_endio, handle_combining_error, + REQ_OP_WRITE); } static void load_summary_endio(struct bio *bio) @@ -4555,8 +4555,8 @@ static void load_slab_summary(void *context, struct vdo_completion *parent) return; } - submit_metadata_vio(vio, depot->summary_origin, load_summary_endio, - handle_combining_error, REQ_OP_READ); + vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio, + handle_combining_error, REQ_OP_READ); } /* Implements vdo_zone_action_fn. */ diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index b494c11e1d64..1ead769f0150 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -813,11 +813,11 @@ void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent) } vdo->super_block.vio.completion.parent = parent; - submit_metadata_vio(&vdo->super_block.vio, - vdo_get_data_region_start(vdo->geometry), - read_super_block_endio, - handle_super_block_read_error, - REQ_OP_READ); + vdo_submit_metadata_vio(&vdo->super_block.vio, + vdo_get_data_region_start(vdo->geometry), + read_super_block_endio, + handle_super_block_read_error, + REQ_OP_READ); } /** @@ -1028,10 +1028,10 @@ void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent) vdo_encode_super_block(super_block->buffer, &vdo->states); super_block->vio.completion.parent = parent; super_block->vio.completion.callback_thread_id = parent->callback_thread_id; - submit_metadata_vio(&super_block->vio, - vdo_get_data_region_start(vdo->geometry), - super_block_write_endio, handle_save_error, - REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); + vdo_submit_metadata_vio(&super_block->vio, + vdo_get_data_region_start(vdo->geometry), + super_block_write_endio, handle_save_error, + REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); } /** -- cgit v1.2.3 From ff9199464886500bd0c3df4a5ebae4760607aeda Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 5 Feb 2024 21:04:17 -0500 Subject: dm vdo: fix sparse 'warning: Using plain integer as NULL pointer' Signed-off-by: Mike Snitzer Signed-off-by: Susan LeGendre-McGhee Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/index.c | 8 ++++---- drivers/md/dm-vdo/vdo.c | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/index.c b/drivers/md/dm-vdo/index.c index e343a973881d..1596f6ba43a5 100644 --- a/drivers/md/dm-vdo/index.c +++ b/drivers/md/dm-vdo/index.c @@ -732,18 +732,18 @@ static void close_chapters(void *arg) static void stop_chapter_writer(struct chapter_writer *writer) { - struct thread *writer_thread = 0; + struct thread *writer_thread = NULL; uds_lock_mutex(&writer->mutex); - if (writer->thread != 0) { + if (writer->thread != NULL) { writer_thread = writer->thread; - writer->thread = 0; + writer->thread = NULL; writer->stop = true; uds_broadcast_cond(&writer->cond); } uds_unlock_mutex(&writer->mutex); - if (writer_thread != 0) + if (writer_thread != NULL) uds_join_threads(writer_thread); } diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 1ead769f0150..e0eddd4007b8 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -889,7 +889,8 @@ int vdo_synchronous_flush(struct vdo *vdo) int result; struct bio bio; - bio_init(&bio, vdo_get_backing_device(vdo), 0, 0, REQ_OP_WRITE | REQ_PREFLUSH); + bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0, + REQ_OP_WRITE | REQ_PREFLUSH); submit_bio_wait(&bio); result = blk_status_to_errno(bio.bi_status); -- cgit v1.2.3 From dcd1332bb5ff996bc92a63948ba32bceae40507c Mon Sep 17 00:00:00 2001 From: Susan LeGendre-McGhee Date: Thu, 15 Feb 2024 11:35:15 -0500 Subject: dm vdo: remove internal ticket references Signed-off-by: Susan LeGendre-McGhee Signed-off-by: Matthew Sakai Signed-off-by: Mike Snitzer --- drivers/md/dm-vdo/block-map.c | 8 ++++---- drivers/md/dm-vdo/data-vio.c | 9 +++++---- drivers/md/dm-vdo/dm-vdo-target.c | 12 +++++------- drivers/md/dm-vdo/memory-alloc.c | 8 ++++---- drivers/md/dm-vdo/packer.c | 16 +++++++--------- drivers/md/dm-vdo/packer.h | 2 +- drivers/md/dm-vdo/repair.c | 4 ++-- drivers/md/dm-vdo/slab-depot.c | 16 +++++++++++----- drivers/md/dm-vdo/sparse-cache.c | 2 +- drivers/md/dm-vdo/vdo.c | 2 +- drivers/md/dm-vdo/vio.c | 1 - 11 files changed, 41 insertions(+), 39 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index e3fadb5f2c2d..5be400743c03 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -542,7 +542,7 @@ static unsigned int distribute_page_over_waitq(struct page_info *info, /* * Increment the busy count once for each pending completion so that this page does not - * stop being busy until all completions have been processed (VDO-83). + * stop being busy until all completions have been processed. */ info->busy += num_pages; @@ -1097,9 +1097,9 @@ static void write_pages(struct vdo_completion *flush_completion) struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache; /* - * We need to cache these two values on the stack since in the error case below, it is - * possible for the last page info to cause the page cache to get freed. Hence once we - * launch the last page, it may be unsafe to dereference the cache [VDO-4724]. + * We need to cache these two values on the stack since it is possible for the last + * page info to cause the page cache to get freed. Hence once we launch the last page, + * it may be unsafe to dereference the cache. */ bool has_unflushed_pages = (cache->pages_to_flush > 0); page_count_t pages_in_flush = cache->pages_in_flush; diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index d77adeb5006e..f6c32dc9a822 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -453,10 +453,11 @@ static void attempt_logical_block_lock(struct vdo_completion *completion) /* * If the new request is a pure read request (not read-modify-write) and the lock_holder is - * writing and has received an allocation (VDO-2683), service the read request immediately - * by copying data from the lock_holder to avoid having to flush the write out of the - * packer just to prevent the read from waiting indefinitely. If the lock_holder does not - * yet have an allocation, prevent it from blocking in the packer and wait on it. + * writing and has received an allocation, service the read request immediately by copying + * data from the lock_holder to avoid having to flush the write out of the packer just to + * prevent the read from waiting indefinitely. If the lock_holder does not yet have an + * allocation, prevent it from blocking in the packer and wait on it. This is necessary in + * order to prevent returning data that may not have actually been written. */ if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) { copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset); diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 7afd1dfec649..0114fa4d48a2 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -945,13 +945,11 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits) * Sets the maximum discard size that will be passed into VDO. This value comes from a * table line value passed in during dmsetup create. * - * The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a - * busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes - * 15 to 16 seconds on a busy HD system. - * - * But using large values results in 120 second blocked task warnings in /var/log/kern.log. - * In order to avoid these warnings, we choose to use the smallest reasonable value. See - * VDO-3062 and VDO-3087. + * The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a + * busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes + * 15 to 16 seconds on a busy HD system. However, using large values results in 120 second + * blocked task warnings in kernel logs. In order to avoid these warnings, we choose to + * use the smallest reasonable value. * * The value is displayed in sysfs, and also used by dm-thin to determine whether to pass * down discards. The block layer splits large discards on this boundary when this is set. diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c index 3b2bda9248cb..5cd387f9294e 100644 --- a/drivers/md/dm-vdo/memory-alloc.c +++ b/drivers/md/dm-vdo/memory-alloc.c @@ -235,8 +235,8 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) if (p == NULL) { /* * It is possible for kmalloc to fail to allocate memory because there is - * no page available (see VDO-3688). A short sleep may allow the page - * reclaimer to free a page. + * no page available. A short sleep may allow the page reclaimer to + * free a page. */ fsleep(1000); p = kmalloc(size, gfp_flags); @@ -251,8 +251,8 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) UDS_SUCCESS) { /* * It is possible for __vmalloc to fail to allocate memory because there - * are no pages available (see VDO-3661). A short sleep may allow the page - * reclaimer to free enough pages for a small allocation. + * are no pages available. A short sleep may allow the page reclaimer + * to free enough pages for a small allocation. * * For larger allocations, the page_alloc code is racing against the page * reclaimer. If the page reclaimer can stay ahead of page_alloc, the diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index e391cac6c92d..b0ffb21ec436 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -595,15 +595,13 @@ void vdo_attempt_packing(struct data_vio *data_vio) } /* - * The check of may_vio_block_in_packer() here will set the data_vio's compression state to - * VIO_PACKING if the data_vio is allowed to be compressed (if it has already been - * canceled, we'll fall out here). Once the data_vio is in the VIO_PACKING state, it must - * be guaranteed to be put in a bin before any more requests can be processed by the packer - * thread. Otherwise, a canceling data_vio could attempt to remove the canceled data_vio - * from the packer and fail to rendezvous with it (VDO-2809). We must also make sure that - * we will actually bin the data_vio and not give up on it as being larger than the space - * used in the fullest bin. Hence we must call select_bin() before calling - * may_vio_block_in_packer() (VDO-2826). + * The advance_data_vio_compression_stage() check here verifies that the data_vio is + * allowed to be compressed (if it has already been canceled, we'll fall out here). Once + * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin + * before any more requests can be processed by the packer thread. Otherwise, a canceling + * data_vio could attempt to remove the canceled data_vio from the packer and fail to + * rendezvous with it. Thus, we must call select_bin() first to ensure that we will + * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage. */ bin = select_bin(packer, data_vio); if ((bin == NULL) || diff --git a/drivers/md/dm-vdo/packer.h b/drivers/md/dm-vdo/packer.h index 2dcc40bd4417..0f3be44710b5 100644 --- a/drivers/md/dm-vdo/packer.h +++ b/drivers/md/dm-vdo/packer.h @@ -58,7 +58,7 @@ struct compressed_block { * * There is one special bin which is used to hold data_vios which have been canceled and removed * from their bin by the packer. These data_vios need to wait for the canceller to rendezvous with - * them (VDO-2809) and so they sit in this special bin. + * them and so they sit in this special bin. */ struct packer_bin { /* List links for packer.packer_bins */ diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index a75278eb8aa4..847aca9fbe47 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -1504,8 +1504,8 @@ static int extract_new_mappings(struct repair_completion *repair) static noinline int compute_usages(struct repair_completion *repair) { /* - * VDO-5182: function is declared noinline to avoid what is likely a spurious valgrind - * error about this structure being uninitialized. + * This function is declared noinline to avoid a spurious valgrind error regarding the + * following structure being uninitialized. */ struct recovery_point recovery_point = { .sequence_number = repair->tail, diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 42126bd60242..5fa7e0838b32 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -334,7 +334,11 @@ static void launch_write(struct slab_summary_block *block) /* * Flush before writing to ensure that the slab journal tail blocks and reference updates - * covered by this summary update are stable (VDO-2332). + * covered by this summary update are stable. Otherwise, a subsequent recovery could + * encounter a slab summary update that refers to a slab journal tail block that has not + * actually been written. In such cases, the slab journal referenced will be treated as + * empty, causing any data within the slab which predates the existing recovery journal + * entries to be lost. */ pbn = (depot->summary_origin + (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) + @@ -499,7 +503,7 @@ static void reap_slab_journal(struct slab_journal *journal) * journal block writes can be issued while previous slab summary updates have not yet been * made. Even though those slab journal block writes will be ignored if the slab summary * update is not persisted, they may still overwrite the to-be-reaped slab journal block - * resulting in a loss of reference count updates (VDO-2912). + * resulting in a loss of reference count updates. */ journal->flush_waiter.callback = flush_for_reaping; acquire_vio_from_pool(journal->slab->allocator->vio_pool, @@ -770,7 +774,8 @@ static void write_slab_journal_block(struct vdo_waiter *waiter, void *context) /* * This block won't be read in recovery until the slab summary is updated to refer to it. - * The slab summary update does a flush which is sufficient to protect us from VDO-2331. + * The slab summary update does a flush which is sufficient to protect us from corruption + * due to out of order slab journal, reference block, or block map writes. */ vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio, complete_write, REQ_OP_WRITE); @@ -1201,7 +1206,8 @@ static void write_reference_block(struct vdo_waiter *waiter, void *context) /* * Flush before writing to ensure that the recovery journal and slab journal entries which - * cover this reference update are stable (VDO-2331). + * cover this reference update are stable. This prevents data corruption that can be caused + * by out of order writes. */ WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written, block->slab->allocator->ref_counts_statistics.blocks_written + 1); @@ -1775,7 +1781,7 @@ static void add_entries(struct slab_journal *journal) (journal->slab->status == VDO_SLAB_REBUILDING)) { /* * Don't add entries while rebuilding or while a partial write is - * outstanding (VDO-2399). + * outstanding, as it could result in reference count corruption. */ break; } diff --git a/drivers/md/dm-vdo/sparse-cache.c b/drivers/md/dm-vdo/sparse-cache.c index 216c8d6256a9..b43a626a42de 100644 --- a/drivers/md/dm-vdo/sparse-cache.c +++ b/drivers/md/dm-vdo/sparse-cache.c @@ -191,7 +191,7 @@ static inline void __down(struct semaphore *semaphore) * happens, sleep briefly to avoid keeping the CPU locked up in * this loop. We could just call cond_resched, but then we'd * still keep consuming CPU time slices and swamp other threads - * trying to do computational work. [VDO-4980] + * trying to do computational work. */ fsleep(1000); } diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index e0eddd4007b8..a40f059d39b3 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -544,7 +544,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason, int result; struct vdo *vdo; - /* VDO-3769 - Set a generic reason so we don't ever return garbage. */ + /* Initialize with a generic failure reason to prevent returning garbage. */ *reason = "Unspecified error"; result = uds_allocate(1, struct vdo, __func__, &vdo); diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index eb6838ddabbb..4832ea46551f 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -123,7 +123,6 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, struct vio *vio; int result; - /* If struct vio grows past 256 bytes, we'll lose benefits of VDOSTORY-176. */ BUILD_BUG_ON(sizeof(struct vio) > 256); /* -- cgit v1.2.3 From eebd4e163024944ed53b6ad6d147e49df24dba32 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 21 Feb 2024 17:17:31 +0800 Subject: dm vdo: fix various function names referenced in comment blocks No functional modification involved. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: Matthew Sakai Signed-off-by: Mike Snitzer --- drivers/md/dm-vdo/admin-state.c | 2 +- drivers/md/dm-vdo/dedupe.c | 2 +- drivers/md/dm-vdo/encodings.c | 2 +- drivers/md/dm-vdo/flush.c | 2 +- drivers/md/dm-vdo/logical-zone.c | 4 ++-- drivers/md/dm-vdo/physical-zone.c | 2 +- drivers/md/dm-vdo/slab-depot.c | 6 +++--- drivers/md/dm-vdo/vdo.c | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c index 94533a802edb..1423f4cebb8a 100644 --- a/drivers/md/dm-vdo/admin-state.c +++ b/drivers/md/dm-vdo/admin-state.c @@ -300,7 +300,7 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch } /** - * vdo_drain_operation() - Check that an operation is a drain. + * assert_vdo_drain_operation() - Check that an operation is a drain. * @waiter The completion to finish with an error if the operation is not a drain. * * Return: true if the specified operation is a drain. diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 9468d7fad443..c8018c5799bd 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -2839,7 +2839,7 @@ static const char *index_state_to_string(struct hash_zones *zones, } /** - * vdo_dump_hash_zone() - Dump information about a hash zone to the log for debugging. + * dump_hash_zone() - Dump information about a hash zone to the log for debugging. * @zone: The zone to dump. */ static void dump_hash_zone(const struct hash_zone *zone) diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index 9e45411fe816..5012042b26af 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -1172,7 +1172,7 @@ static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_componen } /** - * vdo_decode_component() - Decode the component data for the vdo itself out of the super block. + * decode_vdo_component() - Decode the component data for the vdo itself out of the super block. * * Return: VDO_SUCCESS or an error. */ diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index 391b6203efc6..1bc13470a608 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -88,7 +88,7 @@ static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *c } /** - * waiter_as_flush() - Convert a vdo_flush's generic wait queue entry back to the vdo_flush. + * vdo_waiter_as_flush() - Convert a vdo_flush's generic wait queue entry back to the vdo_flush. * @waiter: The wait queue entry to convert. * * Return: The wait queue entry as a vdo_flush. diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index cfbf1701ca84..e11f9c859349 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -295,8 +295,8 @@ static void notify_flusher(struct vdo_completion *completion) } /** - * void attempt_generation_complete_notification() - Notify the flusher if some generation no - * longer has active VIOs. + * attempt_generation_complete_notification() - Notify the flusher if some generation no + * longer has active VIOs. * @completion: The zone completion. */ static void attempt_generation_complete_notification(struct vdo_completion *completion) diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index 3bcf6f1ba77f..a9380966b56f 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -258,7 +258,7 @@ static int make_pbn_lock_pool(size_t capacity, struct pbn_lock_pool **pool_ptr) } /** - * vdo_free_pbn_lock_pool() - Free a PBN lock pool. + * free_pbn_lock_pool() - Free a PBN lock pool. * @pool: The lock pool to free. * * This also frees all the PBN locks it allocated, so the caller must ensure that all locks have diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index e10c3db8e665..8c6376e79a23 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -2035,8 +2035,8 @@ static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr, } /** - * vdo_find_free_block() - Find the first block with a reference count of zero in the specified - * range of reference counter indexes. + * find_free_block() - Find the first block with a reference count of zero in the specified + * range of reference counter indexes. * @slab: The slab counters to scan. * @index_ptr: A pointer to hold the array index of the free block. * @@ -5032,7 +5032,7 @@ get_ref_counts_statistics(const struct slab_depot *depot) } /** - * get_depot_slab_journal_statistics() - Get the aggregated slab journal statistics for the depot. + * get_slab_journal_statistics() - Get the aggregated slab journal statistics for the depot. * @depot: The slab depot. * * Return: The aggregated statistics for all slab journals in the depot. diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index a40f059d39b3..c161dffeb91a 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -267,8 +267,8 @@ static int __must_check initialize_thread_config(struct thread_count_config coun } /** - * vdo_read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block - * device. + * read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block + * device. * @vdo: The vdo whose geometry is to be read. * * Return: VDO_SUCCESS or an error code. -- cgit v1.2.3 From 04530b487bf35962ac3fbf490a6ca07d7a5d8869 Mon Sep 17 00:00:00 2001 From: Matthew Sakai Date: Thu, 22 Feb 2024 20:48:16 -0500 Subject: dm vdo: remove outdated pointer_map reference Signed-off-by: Matthew Sakai Signed-off-by: Mike Snitzer --- drivers/md/dm-vdo/vdo.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index c161dffeb91a..d16404588881 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -67,10 +67,7 @@ struct sync_completion { struct completion completion; }; -/* - * We don't expect this set to ever get really large, so a linked list is adequate. We can use a - * pointer_map if we need to later. - */ +/* A linked list is adequate for the small number of entries we expect. */ struct device_registry { struct list_head links; /* TODO: Convert to rcu per kernel recommendation. */ -- cgit v1.2.3 From 6008d526b06bab2cbea194948aec0cd04461dcc6 Mon Sep 17 00:00:00 2001 From: Bruce Johnston Date: Mon, 26 Feb 2024 17:04:43 -0500 Subject: dm-vdo: change unnamed enums to defines Signed-off-by: Bruce Johnston Signed-off-by: Matthew Sakai Signed-off-by: Mike Snitzer --- drivers/md/dm-vdo/block-map.c | 6 ++--- drivers/md/dm-vdo/data-vio.c | 8 +++--- drivers/md/dm-vdo/dedupe.c | 20 ++++++--------- drivers/md/dm-vdo/dm-vdo-target.c | 28 +++++++++------------ drivers/md/dm-vdo/dump.c | 11 +++----- drivers/md/dm-vdo/encodings.c | 4 +-- drivers/md/dm-vdo/errors.c | 4 +-- drivers/md/dm-vdo/indexer/config.c | 10 +++----- drivers/md/dm-vdo/indexer/delta-index.c | 40 ++++++++---------------------- drivers/md/dm-vdo/indexer/index-layout.c | 20 ++++++--------- drivers/md/dm-vdo/indexer/index-page-map.c | 4 +-- drivers/md/dm-vdo/indexer/io-factory.c | 2 +- drivers/md/dm-vdo/indexer/open-chapter.c | 8 +++--- drivers/md/dm-vdo/indexer/radix-sort.c | 6 ++--- drivers/md/dm-vdo/indexer/sparse-cache.c | 6 ++--- drivers/md/dm-vdo/indexer/volume-index.c | 6 +++-- drivers/md/dm-vdo/indexer/volume.c | 12 ++++----- drivers/md/dm-vdo/int-map.c | 12 ++++----- drivers/md/dm-vdo/logical-zone.c | 4 +-- drivers/md/dm-vdo/packer.c | 4 +-- drivers/md/dm-vdo/physical-zone.c | 6 ++--- drivers/md/dm-vdo/priority-table.c | 4 +-- drivers/md/dm-vdo/recovery-journal.c | 16 ++++++------ drivers/md/dm-vdo/thread-utils.c | 8 +++--- drivers/md/dm-vdo/vdo.c | 2 +- 25 files changed, 91 insertions(+), 160 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index 5be400743c03..b7274e94b269 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -114,10 +114,8 @@ const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY = { .pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX), }; -enum { - LOG_INTERVAL = 4000, - DISPLAY_INTERVAL = 100000, -}; +#define LOG_INTERVAL 4000 +#define DISPLAY_INTERVAL 100000 /* * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread. diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index f6c32dc9a822..1630993e536f 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -114,9 +114,7 @@ static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEA * them are awakened. */ -enum { - DATA_VIO_RELEASE_BATCH_SIZE = 128, -}; +#define DATA_VIO_RELEASE_BATCH_SIZE 128 static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1; static const u32 COMPRESSION_STATUS_MASK = 0xff; @@ -1044,8 +1042,8 @@ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios) * In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the * second clock tick). These numbers were picked based on experiments with lab machines. */ - enum { ELEMENTS_PER_BATCH = 35 }; - enum { SLEEP_FOR_SYSLOG = 4000 }; + static const int ELEMENTS_PER_BATCH = 35; + static const int SLEEP_FOR_SYSLOG = 4000; if (pool == NULL) return; diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index c8018c5799bd..b453a204239a 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -154,11 +154,9 @@ struct uds_attribute { const char *(*show_string)(struct hash_zones *hash_zones); }; -enum timer_state { - DEDUPE_QUERY_TIMER_IDLE, - DEDUPE_QUERY_TIMER_RUNNING, - DEDUPE_QUERY_TIMER_FIRED, -}; +#define DEDUPE_QUERY_TIMER_IDLE 0 +#define DEDUPE_QUERY_TIMER_RUNNING 1 +#define DEDUPE_QUERY_TIMER_FIRED 2 enum dedupe_context_state { DEDUPE_CONTEXT_IDLE, @@ -185,11 +183,9 @@ static const char *SUSPENDED = "suspended"; static const char *UNKNOWN = "unknown"; /* Version 2 uses the kernel space UDS index and is limited to 16 bytes */ -enum { - UDS_ADVICE_VERSION = 2, - /* version byte + state byte + 64-bit little-endian PBN */ - UDS_ADVICE_SIZE = 1 + 1 + sizeof(u64), -}; +#define UDS_ADVICE_VERSION 2 +/* version byte + state byte + 64-bit little-endian PBN */ +#define UDS_ADVICE_SIZE (1 + 1 + sizeof(u64)) enum hash_lock_state { /* State for locks that are not in use or are being initialized. */ @@ -279,9 +275,7 @@ struct hash_lock { struct vdo_wait_queue waiters; }; -enum { - LOCK_POOL_CAPACITY = MAXIMUM_VDO_USER_VIOS, -}; +#define LOCK_POOL_CAPACITY MAXIMUM_VDO_USER_VIOS struct hash_zones { struct action_manager *manager; diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 429a285cbc4a..175ee56a89e1 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -42,7 +42,7 @@ #define CURRENT_VERSION "8.3.0.65" -enum { +enum admin_phases { GROW_LOGICAL_PHASE_START, GROW_LOGICAL_PHASE_GROW_BLOCK_MAP, GROW_LOGICAL_PHASE_END, @@ -142,10 +142,8 @@ static const char * const ADMIN_PHASE_NAMES[] = { "SUSPEND_PHASE_END", }; -enum { - /* If we bump this, update the arrays below */ - TABLE_VERSION = 4, -}; +/* If we bump this, update the arrays below */ +#define TABLE_VERSION 4 /* arrays for handling different table versions */ static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 }; @@ -159,17 +157,15 @@ static const u8 POOL_NAME_ARG_INDEX[] = { 8, 10, 8 }; * need to scan 16 words, so it's not likely to be a big deal compared to other resource usage. */ -enum { - /* - * This minimum size for the bit array creates a numbering space of 0-999, which allows - * successive starts of the same volume to have different instance numbers in any - * reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that - * the ephemeral stats have reset to zero. - */ - BIT_COUNT_MINIMUM = 1000, - /** Grow the bit array by this many bits when needed */ - BIT_COUNT_INCREMENT = 100, -}; +/* + * This minimum size for the bit array creates a numbering space of 0-999, which allows + * successive starts of the same volume to have different instance numbers in any + * reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that + * the ephemeral stats have reset to zero. + */ +#define BIT_COUNT_MINIMUM 1000 +/* Grow the bit array by this many bits when needed */ +#define BIT_COUNT_INCREMENT 100 struct instance_tracker { unsigned int bit_count; diff --git a/drivers/md/dm-vdo/dump.c b/drivers/md/dm-vdo/dump.c index 91bc8ed36aa7..2a0890b54186 100644 --- a/drivers/md/dm-vdo/dump.c +++ b/drivers/md/dm-vdo/dump.c @@ -41,10 +41,10 @@ enum dump_option_flags { FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT) }; -enum { - FLAGS_ALL_POOLS = (FLAG_SHOW_VIO_POOL), - DEFAULT_DUMP_FLAGS = (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS) -}; +#define FLAGS_ALL_POOLS (FLAG_SHOW_VIO_POOL) +#define DEFAULT_DUMP_FLAGS (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS) +/* Another static buffer... log10(256) = 2.408+, round up: */ +#define DIGITS_PER_U64 (1 + sizeof(u64) * 2409 / 1000) static inline bool is_arg_string(const char *arg, const char *this_option) { @@ -222,9 +222,6 @@ void dump_data_vio(void *data) * one does run, the log output will be garbled anyway. */ static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN]; - /* Another static buffer... log10(256) = 2.408+, round up: */ - enum { DIGITS_PER_U64 = 1 + sizeof(u64) * 2409 / 1000 }; - static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64]; static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64]; static char flags_dump_buffer[8]; diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index 5012042b26af..4a0a6afea670 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -55,9 +55,7 @@ static const struct header GEOMETRY_BLOCK_HEADER_4_0 = { const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001"; -enum { - PAGE_HEADER_4_1_SIZE = 8 + 8 + 8 + 1 + 1 + 1 + 1, -}; +#define PAGE_HEADER_4_1_SIZE (8 + 8 + 8 + 1 + 1 + 1 + 1) static const struct version_number BLOCK_MAP_4_1 = { .major_version = 4, diff --git a/drivers/md/dm-vdo/errors.c b/drivers/md/dm-vdo/errors.c index e8599599a377..dc1f0533bd7a 100644 --- a/drivers/md/dm-vdo/errors.c +++ b/drivers/md/dm-vdo/errors.c @@ -79,9 +79,7 @@ struct error_block { const struct error_info *infos; }; -enum { - MAX_ERROR_BLOCKS = 6, -}; +#define MAX_ERROR_BLOCKS 6 static struct { int allocated; diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c index 0bf315e7b5d1..b572350a3d5f 100644 --- a/drivers/md/dm-vdo/indexer/config.c +++ b/drivers/md/dm-vdo/indexer/config.c @@ -15,12 +15,10 @@ static const u8 INDEX_CONFIG_MAGIC[] = "ALBIC"; static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02"; static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02"; -enum { - DEFAULT_VOLUME_READ_THREADS = 2, - MAX_VOLUME_READ_THREADS = 16, - INDEX_CONFIG_MAGIC_LENGTH = sizeof(INDEX_CONFIG_MAGIC) - 1, - INDEX_CONFIG_VERSION_LENGTH = sizeof(INDEX_CONFIG_VERSION_6_02) - 1, -}; +#define DEFAULT_VOLUME_READ_THREADS 2 +#define MAX_VOLUME_READ_THREADS 16 +#define INDEX_CONFIG_MAGIC_LENGTH (sizeof(INDEX_CONFIG_MAGIC) - 1) +#define INDEX_CONFIG_VERSION_LENGTH ((int)(sizeof(INDEX_CONFIG_VERSION_6_02) - 1)) static bool is_version(const u8 *version, u8 *buffer) { diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c index 6a85f93bbcb1..8eece0ba6d93 100644 --- a/drivers/md/dm-vdo/indexer/delta-index.c +++ b/drivers/md/dm-vdo/indexer/delta-index.c @@ -70,17 +70,13 @@ * This is the largest field size supported by get_field() and set_field(). Any field that is * larger is not guaranteed to fit in a single byte-aligned u32. */ -enum { - MAX_FIELD_BITS = (sizeof(u32) - 1) * BITS_PER_BYTE + 1, -}; +#define MAX_FIELD_BITS ((sizeof(u32) - 1) * BITS_PER_BYTE + 1) /* * This is the largest field size supported by get_big_field() and set_big_field(). Any field that * is larger is not guaranteed to fit in a single byte-aligned u64. */ -enum { - MAX_BIG_FIELD_BITS = (sizeof(u64) - 1) * BITS_PER_BYTE + 1, -}; +#define MAX_BIG_FIELD_BITS ((sizeof(u64) - 1) * BITS_PER_BYTE + 1) /* * This is the number of guard bytes needed at the end of the memory byte array when using the bit @@ -88,45 +84,33 @@ enum { * bytes beyond the end of the desired field. The definition is written to make it clear how this * value is derived. */ -enum { - POST_FIELD_GUARD_BYTES = sizeof(u64) - 1, -}; +#define POST_FIELD_GUARD_BYTES (sizeof(u64) - 1) /* The number of guard bits that are needed in the tail guard list */ -enum { - GUARD_BITS = POST_FIELD_GUARD_BYTES * BITS_PER_BYTE -}; +#define GUARD_BITS (POST_FIELD_GUARD_BYTES * BITS_PER_BYTE) /* * The maximum size of a single delta list in bytes. We count guard bytes in this value because a * buffer of this size can be used with move_bits(). */ -enum { - DELTA_LIST_MAX_BYTE_COUNT = - ((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES) -}; +#define DELTA_LIST_MAX_BYTE_COUNT \ + ((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES) /* The number of extra bytes and bits needed to store a collision entry */ -enum { - COLLISION_BYTES = UDS_RECORD_NAME_SIZE, - COLLISION_BITS = COLLISION_BYTES * BITS_PER_BYTE -}; +#define COLLISION_BYTES UDS_RECORD_NAME_SIZE +#define COLLISION_BITS (COLLISION_BYTES * BITS_PER_BYTE) /* * Immutable delta lists are packed into pages containing a header that encodes the delta list * information into 19 bits per list (64KB bit offset). */ - -enum { IMMUTABLE_HEADER_SIZE = 19 }; +#define IMMUTABLE_HEADER_SIZE 19 /* * Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a * number to increment when the format of the data changes. */ - -enum { - MAGIC_SIZE = 8, -}; +#define MAGIC_SIZE 8 static const char DELTA_INDEX_MAGIC[] = "DI-00002"; @@ -216,9 +200,7 @@ static void rebalance_delta_zone(const struct delta_zone *delta_zone, u32 first, static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size) { /* Round up so that each zone is a multiple of 64K in size. */ - enum { - ALLOC_BOUNDARY = 64 * 1024, - }; + size_t ALLOC_BOUNDARY = 64 * 1024; return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY; } diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index af533aa270a8..a0227a75814b 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -54,11 +54,9 @@ * Each save also has a unique nonce. */ -enum { - MAGIC_SIZE = 32, - NONCE_INFO_SIZE = 32, - MAX_SAVES = 2, -}; +#define MAGIC_SIZE 32 +#define NONCE_INFO_SIZE 32 +#define MAX_SAVES 2 enum region_kind { RL_KIND_EMPTY = 0, @@ -82,9 +80,7 @@ enum region_type { RH_TYPE_UNSAVED = 4, }; -enum { - RL_SOLE_INSTANCE = 65535, -}; +#define RL_SOLE_INSTANCE 65535 /* * Super block version 2 is the first released version. @@ -98,11 +94,9 @@ enum { * order to make room to prepend LVM metadata to a volume originally created without lvm. This * allows the index to retain most its deduplication records. */ -enum { - SUPER_VERSION_MINIMUM = 3, - SUPER_VERSION_CURRENT = 3, - SUPER_VERSION_MAXIMUM = 7, -}; +#define SUPER_VERSION_MINIMUM 3 +#define SUPER_VERSION_CURRENT 3 +#define SUPER_VERSION_MAXIMUM 7 static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*"; static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */ diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c index 90d97c33a9c3..37037ac8eee9 100644 --- a/drivers/md/dm-vdo/indexer/index-page-map.c +++ b/drivers/md/dm-vdo/indexer/index-page-map.c @@ -25,9 +25,7 @@ static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02"; -enum { - PAGE_MAP_MAGIC_LENGTH = sizeof(PAGE_MAP_MAGIC) - 1, -}; +#define PAGE_MAP_MAGIC_LENGTH (sizeof(PAGE_MAP_MAGIC) - 1) static inline u32 get_entry_count(const struct index_geometry *geometry) { diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c index 02242df94e37..fecd436986ae 100644 --- a/drivers/md/dm-vdo/indexer/io-factory.c +++ b/drivers/md/dm-vdo/indexer/io-factory.c @@ -37,7 +37,7 @@ struct buffered_reader { u8 *end; }; -enum { MAX_READ_AHEAD_BLOCKS = 4 }; +#define MAX_READ_AHEAD_BLOCKS 4 /* * The buffered writer allows efficient I/O by buffering writes and committing page-sized segments diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c index da16afaec07f..cd2d35e39c20 100644 --- a/drivers/md/dm-vdo/indexer/open-chapter.c +++ b/drivers/md/dm-vdo/indexer/open-chapter.c @@ -46,11 +46,9 @@ static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC"; static const u8 OPEN_CHAPTER_VERSION[] = "02.00"; -enum { - OPEN_CHAPTER_MAGIC_LENGTH = sizeof(OPEN_CHAPTER_MAGIC) - 1, - OPEN_CHAPTER_VERSION_LENGTH = sizeof(OPEN_CHAPTER_VERSION) - 1, - LOAD_RATIO = 2, -}; +#define OPEN_CHAPTER_MAGIC_LENGTH (sizeof(OPEN_CHAPTER_MAGIC) - 1) +#define OPEN_CHAPTER_VERSION_LENGTH (sizeof(OPEN_CHAPTER_VERSION) - 1) +#define LOAD_RATIO 2 static inline size_t records_size(const struct open_chapter_zone *open_chapter) { diff --git a/drivers/md/dm-vdo/indexer/radix-sort.c b/drivers/md/dm-vdo/indexer/radix-sort.c index 1f17c708a652..b86d55f0827e 100644 --- a/drivers/md/dm-vdo/indexer/radix-sort.c +++ b/drivers/md/dm-vdo/indexer/radix-sort.c @@ -17,10 +17,8 @@ * keys to be sorted. */ -enum { - /* Piles smaller than this are handled with a simple insertion sort. */ - INSERTION_SORT_THRESHOLD = 12, -}; +/* Piles smaller than this are handled with a simple insertion sort. */ +#define INSERTION_SORT_THRESHOLD 12 /* Sort keys are pointers to immutable fixed-length arrays of bytes. */ typedef const u8 *sort_key_t; diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.c b/drivers/md/dm-vdo/indexer/sparse-cache.c index f2141de6ed00..9e8672cba3fa 100644 --- a/drivers/md/dm-vdo/indexer/sparse-cache.c +++ b/drivers/md/dm-vdo/indexer/sparse-cache.c @@ -77,10 +77,8 @@ * considered to be a member of the cache for uds_sparse_cache_contains(). */ -enum { - SKIP_SEARCH_THRESHOLD = 20000, - ZONE_ZERO = 0, -}; +#define SKIP_SEARCH_THRESHOLD 20000 +#define ZONE_ZERO 0 /* * These counters are essentially fields of the struct cached_chapter_index, but are segregated diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index 8cbd9280c4bd..a88e515ceef6 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -94,7 +94,8 @@ struct chapter_range { u32 chapter_count; }; -enum { MAGIC_SIZE = 8 }; +#define MAGIC_SIZE 8 + static const char MAGIC_START_5[] = "MI5-0005"; struct sub_index_data { @@ -193,10 +194,11 @@ unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index, return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name); } +#define DELTA_LIST_SIZE 256 + static int compute_volume_sub_index_parameters(const struct uds_configuration *config, struct sub_index_parameters *params) { - enum { DELTA_LIST_SIZE = 256 }; u64 entries_in_volume_index, address_span; u32 chapters_in_volume_index, invalid_chapters; u32 rounded_chapters; diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index eca83b6cab35..002a4264a163 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -60,13 +60,11 @@ * in-memory volume index. */ -enum { - /* The maximum allowable number of contiguous bad chapters */ - MAX_BAD_CHAPTERS = 100, - VOLUME_CACHE_MAX_ENTRIES = (U16_MAX >> 1), - VOLUME_CACHE_QUEUED_FLAG = (1 << 15), - VOLUME_CACHE_MAX_QUEUED_READS = 4096, -}; +/* The maximum allowable number of contiguous bad chapters */ +#define MAX_BAD_CHAPTERS 100 +#define VOLUME_CACHE_MAX_ENTRIES (U16_MAX >> 1) +#define VOLUME_CACHE_QUEUED_FLAG (1 << 15) +#define VOLUME_CACHE_MAX_QUEUED_READS 4096 static const u64 BAD_CHAPTER = U64_MAX; diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c index 99ccbb1339c6..e0953e013f15 100644 --- a/drivers/md/dm-vdo/int-map.c +++ b/drivers/md/dm-vdo/int-map.c @@ -56,13 +56,11 @@ #include "numeric.h" #include "permassert.h" -enum { - DEFAULT_CAPACITY = 16, /* the number of neighborhoods in a new table */ - NEIGHBORHOOD = 255, /* the number of buckets in each neighborhood */ - MAX_PROBES = 1024, /* limit on the number of probes for a free bucket */ - NULL_HOP_OFFSET = 0, /* the hop offset value terminating the hop list */ - DEFAULT_LOAD = 75 /* a compromise between memory use and performance */ -}; +#define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */ +#define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */ +#define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */ +#define NULL_HOP_OFFSET 0 /* the hop offset value terminating the hop list */ +#define DEFAULT_LOAD 75 /* a compromise between memory use and performance */ /** * struct bucket - hash bucket diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index e11f9c859349..c5b3b1c111e3 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -21,9 +21,7 @@ #include "physical-zone.h" #include "vdo.h" -enum { - ALLOCATIONS_PER_ZONE = 128, -}; +#define ALLOCATIONS_PER_ZONE 128 /** * as_logical_zone() - Convert a generic vdo_completion to a logical_zone. diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index b0ffb21ec436..3283c8d56c59 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -30,9 +30,7 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = { .minor_version = 0, }; -enum { - COMPRESSED_BLOCK_1_0_SIZE = 4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS), -}; +#define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS)) /** * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index a9380966b56f..62d142b28282 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -23,10 +23,8 @@ #include "status-codes.h" #include "vdo.h" -enum { - /* Each user data_vio needs a PBN read lock and write lock. */ - LOCK_POOL_CAPACITY = 2 * MAXIMUM_VDO_USER_VIOS, -}; +/* Each user data_vio needs a PBN read lock and write lock. */ +#define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS) struct pbn_lock_implementation { enum pbn_lock_type type; diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c index 9408219b5700..a59e9d40ca90 100644 --- a/drivers/md/dm-vdo/priority-table.c +++ b/drivers/md/dm-vdo/priority-table.c @@ -14,9 +14,7 @@ #include "status-codes.h" /* We use a single 64-bit search vector, so the maximum priority is 63 */ -enum { - MAX_PRIORITY = 63 -}; +#define MAX_PRIORITY 63 /* * All the entries with the same priority are queued in a circular list in a bucket for that diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index 1e15bfe42cfc..c1d355346bcf 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -26,15 +26,13 @@ static const u64 RECOVERY_COUNT_MASK = 0xff; -enum { - /* - * The number of reserved blocks must be large enough to prevent a new recovery journal - * block write from overwriting a block which appears to still be a valid head block of the - * journal. Currently, that means reserving enough space for all 2048 data_vios. - */ - RECOVERY_JOURNAL_RESERVED_BLOCKS = - (MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2, -}; +/* + * The number of reserved blocks must be large enough to prevent a new recovery journal + * block write from overwriting a block which appears to still be a valid head block of the + * journal. Currently, that means reserving enough space for all 2048 data_vios. + */ +#define RECOVERY_JOURNAL_RESERVED_BLOCKS \ + ((MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2) /** * DOC: Lock Counters. diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c index aeca14bba852..244abc6ad848 100644 --- a/drivers/md/dm-vdo/thread-utils.c +++ b/drivers/md/dm-vdo/thread-utils.c @@ -27,11 +27,9 @@ struct thread { struct completion thread_done; }; -enum { - ONCE_NOT_DONE = 0, - ONCE_IN_PROGRESS = 1, - ONCE_COMPLETE = 2, -}; +#define ONCE_NOT_DONE 0 +#define ONCE_IN_PROGRESS 1 +#define ONCE_COMPLETE 2 /* Run a function once only, and record that fact in the atomic value. */ void vdo_perform_once(atomic_t *once, void (*function)(void)) diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index d16404588881..6baf319d79c6 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -60,7 +60,7 @@ #include "status-codes.h" #include "vio.h" -enum { PARANOID_THREAD_CONSISTENCY_CHECKS = 0 }; +#define PARANOID_THREAD_CONSISTENCY_CHECKS 0 struct sync_completion { struct vdo_completion vdo_completion; -- cgit v1.2.3 From 0eea6b6e78daa45ca13e9b186da042f9b6139b50 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 13 Feb 2024 10:55:50 -0600 Subject: dm vdo memory-alloc: change from uds_ to vdo_ namespace Signed-off-by: Mike Snitzer Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/action-manager.c | 2 +- drivers/md/dm-vdo/admin-state.c | 2 +- drivers/md/dm-vdo/block-map.c | 60 ++++++++-------- drivers/md/dm-vdo/data-vio.c | 24 +++---- drivers/md/dm-vdo/dedupe.c | 34 ++++----- drivers/md/dm-vdo/dm-vdo-target.c | 56 +++++++-------- drivers/md/dm-vdo/dump.c | 2 +- drivers/md/dm-vdo/encodings.c | 4 +- drivers/md/dm-vdo/flush.c | 12 ++-- drivers/md/dm-vdo/funnel-queue.c | 4 +- drivers/md/dm-vdo/funnel-workqueue.c | 30 ++++---- drivers/md/dm-vdo/indexer/chapter-index.c | 6 +- drivers/md/dm-vdo/indexer/config.c | 4 +- drivers/md/dm-vdo/indexer/delta-index.c | 20 +++--- drivers/md/dm-vdo/indexer/funnel-requestqueue.c | 4 +- drivers/md/dm-vdo/indexer/geometry.c | 4 +- drivers/md/dm-vdo/indexer/index-layout.c | 58 ++++++++-------- drivers/md/dm-vdo/indexer/index-page-map.c | 20 +++--- drivers/md/dm-vdo/indexer/index-session.c | 6 +- drivers/md/dm-vdo/indexer/index.c | 26 +++---- drivers/md/dm-vdo/indexer/io-factory.c | 14 ++-- drivers/md/dm-vdo/indexer/open-chapter.c | 8 +-- drivers/md/dm-vdo/indexer/radix-sort.c | 4 +- drivers/md/dm-vdo/indexer/sparse-cache.c | 22 +++--- drivers/md/dm-vdo/indexer/volume-index.c | 16 ++--- drivers/md/dm-vdo/indexer/volume.c | 36 +++++----- drivers/md/dm-vdo/int-map.c | 14 ++-- drivers/md/dm-vdo/io-submitter.c | 10 +-- drivers/md/dm-vdo/logical-zone.c | 8 +-- drivers/md/dm-vdo/memory-alloc.c | 38 +++++----- drivers/md/dm-vdo/memory-alloc.h | 52 +++++++------- drivers/md/dm-vdo/message-stats.c | 4 +- drivers/md/dm-vdo/packer.c | 14 ++-- drivers/md/dm-vdo/physical-zone.c | 16 ++--- drivers/md/dm-vdo/pool-sysfs.c | 2 +- drivers/md/dm-vdo/priority-table.c | 4 +- drivers/md/dm-vdo/recovery-journal.c | 36 +++++----- drivers/md/dm-vdo/repair.c | 24 +++---- drivers/md/dm-vdo/slab-depot.c | 80 ++++++++++----------- drivers/md/dm-vdo/slab-depot.h | 2 +- drivers/md/dm-vdo/thread-utils.c | 10 +-- drivers/md/dm-vdo/uds-sysfs.c | 4 +- drivers/md/dm-vdo/vdo.c | 92 ++++++++++++------------- drivers/md/dm-vdo/vio.c | 20 +++--- 44 files changed, 453 insertions(+), 455 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c index 973901fc3174..709be4c17d27 100644 --- a/drivers/md/dm-vdo/action-manager.c +++ b/drivers/md/dm-vdo/action-manager.c @@ -107,7 +107,7 @@ int vdo_make_action_manager(zone_count_t zones, struct action_manager **manager_ptr) { struct action_manager *manager; - int result = uds_allocate(1, struct action_manager, __func__, &manager); + int result = vdo_allocate(1, struct action_manager, __func__, &manager); if (result != VDO_SUCCESS) return result; diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c index 1423f4cebb8a..d695af42d140 100644 --- a/drivers/md/dm-vdo/admin-state.c +++ b/drivers/md/dm-vdo/admin-state.c @@ -206,7 +206,7 @@ bool vdo_finish_operation(struct admin_state *state, int result) if (!state->starting) { vdo_set_admin_state_code(state, state->next_state); if (state->waiter != NULL) - vdo_launch_completion(uds_forget(state->waiter)); + vdo_launch_completion(vdo_forget(state->waiter)); } return true; diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index b7274e94b269..b09974ad41d2 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -221,12 +221,12 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache) u64 size = cache->page_count * (u64) VDO_BLOCK_SIZE; int result; - result = uds_allocate(cache->page_count, struct page_info, "page infos", + result = vdo_allocate(cache->page_count, struct page_info, "page infos", &cache->infos); if (result != UDS_SUCCESS) return result; - result = uds_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages); + result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages); if (result != UDS_SUCCESS) return result; @@ -1341,7 +1341,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache) } /* Reset the page map by re-allocating it. */ - vdo_int_map_free(uds_forget(cache->page_map)); + vdo_int_map_free(vdo_forget(cache->page_map)); return vdo_int_map_create(cache->page_count, &cache->page_map); } @@ -2346,17 +2346,17 @@ static int make_segment(struct forest *old_forest, block_count_t new_pages, forest->segments = index + 1; - result = uds_allocate(forest->segments, struct boundary, + result = vdo_allocate(forest->segments, struct boundary, "forest boundary array", &forest->boundaries); if (result != VDO_SUCCESS) return result; - result = uds_allocate(forest->segments, struct tree_page *, + result = vdo_allocate(forest->segments, struct tree_page *, "forest page pointers", &forest->pages); if (result != VDO_SUCCESS) return result; - result = uds_allocate(new_pages, struct tree_page, + result = vdo_allocate(new_pages, struct tree_page, "new forest pages", &forest->pages[index]); if (result != VDO_SUCCESS) return result; @@ -2382,7 +2382,7 @@ static int make_segment(struct forest *old_forest, block_count_t new_pages, struct block_map_tree *tree = &(forest->trees[root]); height_t height; - int result = uds_allocate(forest->segments, + int result = vdo_allocate(forest->segments, struct block_map_tree_segment, "tree root segments", &tree->segments); if (result != VDO_SUCCESS) @@ -2424,15 +2424,15 @@ static void deforest(struct forest *forest, size_t first_page_segment) size_t segment; for (segment = first_page_segment; segment < forest->segments; segment++) - uds_free(forest->pages[segment]); - uds_free(forest->pages); + vdo_free(forest->pages[segment]); + vdo_free(forest->pages); } for (root = 0; root < forest->map->root_count; root++) - uds_free(forest->trees[root].segments); + vdo_free(forest->trees[root].segments); - uds_free(forest->boundaries); - uds_free(forest); + vdo_free(forest->boundaries); + vdo_free(forest); } /** @@ -2459,7 +2459,7 @@ static int make_forest(struct block_map *map, block_count_t entries) return VDO_SUCCESS; } - result = uds_allocate_extended(struct forest, map->root_count, + result = vdo_allocate_extended(struct forest, map->root_count, struct block_map_tree, __func__, &forest); if (result != VDO_SUCCESS) @@ -2485,7 +2485,7 @@ static void replace_forest(struct block_map *map) if (map->next_forest != NULL) { if (map->forest != NULL) deforest(map->forest, map->forest->segments); - map->forest = uds_forget(map->next_forest); + map->forest = vdo_forget(map->next_forest); } map->entry_count = map->next_entry_count; @@ -2501,11 +2501,11 @@ static void finish_cursor(struct cursor *cursor) struct cursors *cursors = cursor->parent; struct vdo_completion *completion = cursors->completion; - return_vio_to_pool(cursors->pool, uds_forget(cursor->vio)); + return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio)); if (--cursors->active_roots > 0) return; - uds_free(cursors); + vdo_free(cursors); vdo_finish_completion(completion); } @@ -2681,7 +2681,7 @@ void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback, struct cursors *cursors; int result; - result = uds_allocate_extended(struct cursors, map->root_count, + result = vdo_allocate_extended(struct cursors, map->root_count, struct cursor, __func__, &cursors); if (result != VDO_SUCCESS) { vdo_fail_completion(completion, result); @@ -2729,7 +2729,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map, zone->thread_id = vdo->thread_config.logical_threads[zone_number]; zone->block_map = map; - result = uds_allocate_extended(struct dirty_lists, maximum_age, + result = vdo_allocate_extended(struct dirty_lists, maximum_age, dirty_era_t, __func__, &zone->dirty_lists); if (result != VDO_SUCCESS) @@ -2822,19 +2822,19 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone) { struct vdo_page_cache *cache = &zone->page_cache; - uds_free(uds_forget(zone->dirty_lists)); - free_vio_pool(uds_forget(zone->vio_pool)); - vdo_int_map_free(uds_forget(zone->loading_pages)); + vdo_free(vdo_forget(zone->dirty_lists)); + free_vio_pool(vdo_forget(zone->vio_pool)); + vdo_int_map_free(vdo_forget(zone->loading_pages)); if (cache->infos != NULL) { struct page_info *info; for (info = cache->infos; info < cache->infos + cache->page_count; info++) - free_vio(uds_forget(info->vio)); + free_vio(vdo_forget(info->vio)); } - vdo_int_map_free(uds_forget(cache->page_map)); - uds_free(uds_forget(cache->infos)); - uds_free(uds_forget(cache->pages)); + vdo_int_map_free(vdo_forget(cache->page_map)); + vdo_free(vdo_forget(cache->infos)); + vdo_free(vdo_forget(cache->pages)); } void vdo_free_block_map(struct block_map *map) @@ -2849,9 +2849,9 @@ void vdo_free_block_map(struct block_map *map) vdo_abandon_block_map_growth(map); if (map->forest != NULL) - deforest(uds_forget(map->forest), 0); - uds_free(uds_forget(map->action_manager)); - uds_free(map); + deforest(vdo_forget(map->forest), 0); + vdo_free(vdo_forget(map->action_manager)); + vdo_free(map); } /* @journal may be NULL. */ @@ -2871,7 +2871,7 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical if (result != UDS_SUCCESS) return result; - result = uds_allocate_extended(struct block_map, + result = vdo_allocate_extended(struct block_map, vdo->thread_config.logical_zone_count, struct block_map_zone, __func__, &map); if (result != UDS_SUCCESS) @@ -3053,7 +3053,7 @@ void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent) void vdo_abandon_block_map_growth(struct block_map *map) { - struct forest *forest = uds_forget(map->next_forest); + struct forest *forest = vdo_forget(map->next_forest); if (forest != NULL) deforest(forest, forest->segments - 1); diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 1630993e536f..dcdd767e40e5 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -789,20 +789,20 @@ static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo) int result; BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE); - result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data", + result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data", &data_vio->vio.data); if (result != VDO_SUCCESS) return uds_log_error_strerror(result, "data_vio data allocation failure"); - result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block", + result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block", &data_vio->compression.block); if (result != VDO_SUCCESS) { return uds_log_error_strerror(result, "data_vio compressed block allocation failure"); } - result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch", + result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch", &data_vio->scratch_block); if (result != VDO_SUCCESS) return uds_log_error_strerror(result, @@ -825,10 +825,10 @@ static void destroy_data_vio(struct data_vio *data_vio) if (data_vio == NULL) return; - vdo_free_bio(uds_forget(data_vio->vio.bio)); - uds_free(uds_forget(data_vio->vio.data)); - uds_free(uds_forget(data_vio->compression.block)); - uds_free(uds_forget(data_vio->scratch_block)); + vdo_free_bio(vdo_forget(data_vio->vio.bio)); + vdo_free(vdo_forget(data_vio->vio.data)); + vdo_free(vdo_forget(data_vio->compression.block)); + vdo_free(vdo_forget(data_vio->scratch_block)); } /** @@ -845,7 +845,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, struct data_vio_pool *pool; data_vio_count_t i; - result = uds_allocate_extended(struct data_vio_pool, pool_size, struct data_vio, + result = vdo_allocate_extended(struct data_vio_pool, pool_size, struct data_vio, __func__, &pool); if (result != UDS_SUCCESS) return result; @@ -867,7 +867,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, result = uds_make_funnel_queue(&pool->queue); if (result != UDS_SUCCESS) { - free_data_vio_pool(uds_forget(pool)); + free_data_vio_pool(vdo_forget(pool)); return result; } @@ -924,8 +924,8 @@ void free_data_vio_pool(struct data_vio_pool *pool) destroy_data_vio(data_vio); } - uds_free_funnel_queue(uds_forget(pool->queue)); - uds_free(pool); + uds_free_funnel_queue(vdo_forget(pool->queue)); + vdo_free(pool); } static bool acquire_permit(struct limiter *limiter) @@ -1431,7 +1431,7 @@ void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset) allocation->pbn = VDO_ZERO_BLOCK; vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn, - uds_forget(allocation->lock)); + vdo_forget(allocation->lock)); } /** diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index b453a204239a..7cdbe825116f 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -700,7 +700,7 @@ static void unlock_duplicate_pbn(struct vdo_completion *completion) "must have a duplicate lock to release"); vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, - uds_forget(lock->duplicate_lock)); + vdo_forget(lock->duplicate_lock)); if (lock->state == VDO_HASH_LOCK_BYPASSING) { complete_data_vio(completion); return; @@ -896,7 +896,7 @@ static int __must_check acquire_lock(struct hash_zone *zone, result = vdo_int_map_put(zone->hash_lock_map, hash_lock_key(new_lock), new_lock, (replace_lock != NULL), (void **) &lock); if (result != VDO_SUCCESS) { - return_hash_lock_to_pool(zone, uds_forget(new_lock)); + return_hash_lock_to_pool(zone, vdo_forget(new_lock)); return result; } @@ -915,7 +915,7 @@ static int __must_check acquire_lock(struct hash_zone *zone, lock->registered = true; } else { /* There's already a lock for the hash, so we don't need the borrowed lock. */ - return_hash_lock_to_pool(zone, uds_forget(new_lock)); + return_hash_lock_to_pool(zone, vdo_forget(new_lock)); } *lock_ptr = lock; @@ -1980,7 +1980,7 @@ static void transfer_allocation_lock(struct data_vio *data_vio) * Since the lock is being transferred, the holder count doesn't change (and isn't even * safe to examine on this thread). */ - hash_lock->duplicate_lock = uds_forget(allocation->lock); + hash_lock->duplicate_lock = vdo_forget(allocation->lock); } /** @@ -2025,7 +2025,7 @@ void vdo_share_compressed_write_lock(struct data_vio *data_vio, static void dedupe_kobj_release(struct kobject *directory) { - uds_free(container_of(directory, struct hash_zones, dedupe_directory)); + vdo_free(container_of(directory, struct hash_zones, dedupe_directory)); } static ssize_t dedupe_status_show(struct kobject *directory, struct attribute *attr, @@ -2083,12 +2083,12 @@ static void start_uds_queue(void *ptr) */ struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue()); - uds_register_allocating_thread(&thread->allocating_thread, NULL); + vdo_register_allocating_thread(&thread->allocating_thread, NULL); } static void finish_uds_queue(void *ptr __always_unused) { - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); } static void close_index(struct hash_zones *zones) @@ -2259,7 +2259,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones) result = vdo_make_thread(vdo, vdo->thread_config.dedupe_thread, &uds_queue_type, 1, NULL); if (result != VDO_SUCCESS) { - uds_destroy_index_session(uds_forget(zones->index_session)); + uds_destroy_index_session(vdo_forget(zones->index_session)); uds_log_error("UDS index queue initialization failed (%d)", result); return result; } @@ -2417,7 +2417,7 @@ static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zone vdo_set_completion_callback(&zone->completion, timeout_index_operations_callback, zone->thread_id); INIT_LIST_HEAD(&zone->lock_pool); - result = uds_allocate(LOCK_POOL_CAPACITY, struct hash_lock, "hash_lock array", + result = vdo_allocate(LOCK_POOL_CAPACITY, struct hash_lock, "hash_lock array", &zone->lock_array); if (result != VDO_SUCCESS) return result; @@ -2471,14 +2471,14 @@ int vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr) if (zone_count == 0) return VDO_SUCCESS; - result = uds_allocate_extended(struct hash_zones, zone_count, struct hash_zone, + result = vdo_allocate_extended(struct hash_zones, zone_count, struct hash_zone, __func__, &zones); if (result != VDO_SUCCESS) return result; result = initialize_index(vdo, zones); if (result != VDO_SUCCESS) { - uds_free(zones); + vdo_free(zones); return result; } @@ -2510,7 +2510,7 @@ void vdo_finish_dedupe_index(struct hash_zones *zones) if (zones == NULL) return; - uds_destroy_index_session(uds_forget(zones->index_session)); + uds_destroy_index_session(vdo_forget(zones->index_session)); } /** @@ -2524,14 +2524,14 @@ void vdo_free_hash_zones(struct hash_zones *zones) if (zones == NULL) return; - uds_free(uds_forget(zones->manager)); + vdo_free(vdo_forget(zones->manager)); for (i = 0; i < zones->zone_count; i++) { struct hash_zone *zone = &zones->zones[i]; - uds_free_funnel_queue(uds_forget(zone->timed_out_complete)); - vdo_int_map_free(uds_forget(zone->hash_lock_map)); - uds_free(uds_forget(zone->lock_array)); + uds_free_funnel_queue(vdo_forget(zone->timed_out_complete)); + vdo_int_map_free(vdo_forget(zone->hash_lock_map)); + vdo_free(vdo_forget(zone->lock_array)); } if (zones->index_session != NULL) @@ -2539,7 +2539,7 @@ void vdo_free_hash_zones(struct hash_zones *zones) ratelimit_state_exit(&zones->ratelimiter); if (vdo_get_admin_state_code(&zones->state) == VDO_ADMIN_STATE_NEW) - uds_free(zones); + vdo_free(zones); else kobject_put(&zones->dedupe_directory); } diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 175ee56a89e1..86c30fbd75ca 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -189,12 +189,12 @@ static void free_device_config(struct device_config *config) if (config->owned_device != NULL) dm_put_device(config->owning_target, config->owned_device); - uds_free(config->parent_device_name); - uds_free(config->original_string); + vdo_free(config->parent_device_name); + vdo_free(config->original_string); /* Reduce the chance a use-after-free (as in BZ 1669960) happens to work. */ memset(config, 0, sizeof(*config)); - uds_free(config); + vdo_free(config); } /** @@ -249,15 +249,15 @@ static void free_string_array(char **string_array) unsigned int offset; for (offset = 0; string_array[offset] != NULL; offset++) - uds_free(string_array[offset]); - uds_free(string_array); + vdo_free(string_array[offset]); + vdo_free(string_array); } /* * Split the input string into substrings, separated at occurrences of the indicated character, * returning a null-terminated list of string pointers. * - * The string pointers and the pointer array itself should both be freed with uds_free() when no + * The string pointers and the pointer array itself should both be freed with vdo_free() when no * longer needed. This can be done with vdo_free_string_array (below) if the pointers in the array * are not changed. Since the array and copied strings are allocated by this function, it may only * be used in contexts where allocation is permitted. @@ -278,7 +278,7 @@ static int split_string(const char *string, char separator, char ***substring_ar substring_count++; } - result = uds_allocate(substring_count + 1, char *, "string-splitting array", + result = vdo_allocate(substring_count + 1, char *, "string-splitting array", &substrings); if (result != UDS_SUCCESS) return result; @@ -287,7 +287,7 @@ static int split_string(const char *string, char separator, char ***substring_ar if (*s == separator) { ptrdiff_t length = s - string; - result = uds_allocate(length + 1, char, "split string", + result = vdo_allocate(length + 1, char, "split string", &substrings[current_substring]); if (result != UDS_SUCCESS) { free_string_array(substrings); @@ -308,7 +308,7 @@ static int split_string(const char *string, char separator, char ***substring_ar BUG_ON(current_substring != (substring_count - 1)); length = strlen(string); - result = uds_allocate(length + 1, char, "split string", + result = vdo_allocate(length + 1, char, "split string", &substrings[current_substring]); if (result != UDS_SUCCESS) { free_string_array(substrings); @@ -337,7 +337,7 @@ static int join_strings(char **substring_array, size_t array_length, char separa for (i = 0; (i < array_length) && (substring_array[i] != NULL); i++) string_length += strlen(substring_array[i]) + 1; - result = uds_allocate(string_length, char, __func__, &output); + result = vdo_allocate(string_length, char, __func__, &output); if (result != VDO_SUCCESS) return result; @@ -731,7 +731,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti, return VDO_BAD_CONFIGURATION; } - result = uds_allocate(1, struct device_config, "device_config", &config); + result = vdo_allocate(1, struct device_config, "device_config", &config); if (result != VDO_SUCCESS) { handle_parse_error(config, error_ptr, "Could not allocate config structure"); @@ -777,7 +777,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti, if (config->version >= 1) dm_shift_arg(&arg_set); - result = uds_duplicate_string(dm_shift_arg(&arg_set), "parent device name", + result = vdo_duplicate_string(dm_shift_arg(&arg_set), "parent device name", &config->parent_device_name); if (result != VDO_SUCCESS) { handle_parse_error(config, error_ptr, @@ -1100,7 +1100,7 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv, } vdo = get_vdo_for_target(ti); - uds_register_allocating_thread(&allocating_thread, NULL); + vdo_register_allocating_thread(&allocating_thread, NULL); vdo_register_thread_device_id(&instance_thread, &vdo->instance); /* @@ -1115,7 +1115,7 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv, } vdo_unregister_thread_device_id(); - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); return result; } @@ -1536,7 +1536,7 @@ static int grow_bit_array(void) unsigned long *new_words; int result; - result = uds_reallocate_memory(instances.words, + result = vdo_reallocate_memory(instances.words, get_bit_array_size(instances.bit_count), get_bit_array_size(new_count), "instance number bit array", &new_words); @@ -1702,7 +1702,7 @@ static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t ne VDO_SLAB_SUMMARY_PARTITION), &vdo->next_layout); if (result != VDO_SUCCESS) { - dm_kcopyd_client_destroy(uds_forget(vdo->partition_copier)); + dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); return result; } @@ -1715,7 +1715,7 @@ static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t ne if (min_new_size > new_size) { /* Copying the journal and summary would destroy some old metadata. */ vdo_uninitialize_layout(&vdo->next_layout); - dm_kcopyd_client_destroy(uds_forget(vdo->partition_copier)); + dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); return VDO_INCREMENT_TOO_SMALL; } @@ -1901,7 +1901,7 @@ static int vdo_ctr(struct dm_target *ti, unsigned int argc, char **argv) const char *device_name; struct vdo *vdo; - uds_register_allocating_thread(&allocating_thread, NULL); + vdo_register_allocating_thread(&allocating_thread, NULL); device_name = vdo_get_device_name(ti); vdo = vdo_find_matching(vdo_is_named, device_name); if (vdo == NULL) { @@ -1912,14 +1912,14 @@ static int vdo_ctr(struct dm_target *ti, unsigned int argc, char **argv) vdo_unregister_thread_device_id(); } - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); return result; } static void vdo_dtr(struct dm_target *ti) { struct device_config *config = ti->private; - struct vdo *vdo = uds_forget(config->vdo); + struct vdo *vdo = vdo_forget(config->vdo); list_del_init(&config->config_list); if (list_empty(&vdo->device_config_list)) { @@ -1930,17 +1930,17 @@ static void vdo_dtr(struct dm_target *ti) struct registered_thread allocating_thread, instance_thread; vdo_register_thread_device_id(&instance_thread, &instance); - uds_register_allocating_thread(&allocating_thread, NULL); + vdo_register_allocating_thread(&allocating_thread, NULL); device_name = vdo_get_device_name(ti); uds_log_info("stopping device '%s'", device_name); if (vdo->dump_on_shutdown) vdo_dump_all(vdo, "device shutdown"); - vdo_destroy(uds_forget(vdo)); + vdo_destroy(vdo_forget(vdo)); uds_log_info("device '%s' stopped", device_name); vdo_unregister_thread_device_id(); - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); release_instance(instance); } else if (config == vdo->device_config) { /* @@ -2323,7 +2323,7 @@ static void handle_load_error(struct vdo_completion *completion) (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) { uds_log_error_strerror(completion->result, "aborting load"); vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL; - load_callback(uds_forget(completion)); + load_callback(vdo_forget(completion)); return; } @@ -2633,7 +2633,7 @@ static void grow_physical_callback(struct vdo_completion *completion) case GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS: vdo_uninitialize_layout(&vdo->layout); vdo->layout = vdo->next_layout; - uds_forget(vdo->next_layout.head); + vdo_forget(vdo->next_layout.head); vdo->states.vdo.config.physical_blocks = vdo->layout.size; vdo_update_slab_depot_size(vdo->depot); vdo_save_components(vdo, completion); @@ -2893,7 +2893,7 @@ static void vdo_module_destroy(void) ASSERT_LOG_ONLY(instances.count == 0, "should have no instance numbers still in use, but have %u", instances.count); - uds_free(instances.words); + vdo_free(instances.words); memset(&instances, 0, sizeof(struct instance_tracker)); uds_log_info("unloaded version %s", CURRENT_VERSION); @@ -2904,7 +2904,7 @@ static int __init vdo_init(void) int result = 0; /* Memory tracking must be initialized first for accurate accounting. */ - uds_memory_init(); + vdo_memory_init(); uds_init_sysfs(); vdo_initialize_thread_device_registry(); @@ -2935,7 +2935,7 @@ static void __exit vdo_exit(void) vdo_module_destroy(); uds_put_sysfs(); /* Memory tracking cleanup must be done last. */ - uds_memory_exit(); + vdo_memory_exit(); } module_init(vdo_init); diff --git a/drivers/md/dm-vdo/dump.c b/drivers/md/dm-vdo/dump.c index 2a0890b54186..52ee9a72781c 100644 --- a/drivers/md/dm-vdo/dump.c +++ b/drivers/md/dm-vdo/dump.c @@ -79,7 +79,7 @@ static void do_dump(struct vdo *vdo, unsigned int dump_options_requested, if ((dump_options_requested & FLAG_SHOW_VDO_STATUS) != 0) vdo_dump_status(vdo); - uds_report_memory_usage(); + vdo_report_memory_usage(); uds_log_info("end of %s dump", UDS_LOGGING_MODULE_NAME); } diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index 4a0a6afea670..56d94339d2af 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -799,7 +799,7 @@ static int allocate_partition(struct layout *layout, u8 id, struct partition *partition; int result; - result = uds_allocate(1, struct partition, __func__, &partition); + result = vdo_allocate(1, struct partition, __func__, &partition); if (result != UDS_SUCCESS) return result; @@ -928,7 +928,7 @@ void vdo_uninitialize_layout(struct layout *layout) struct partition *part = layout->head; layout->head = part->next; - uds_free(part); + vdo_free(part); } memset(layout, 0, sizeof(struct layout)); diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index 1bc13470a608..8d8d9cf4a24c 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -103,9 +103,9 @@ static void *allocate_flush(gfp_t gfp_mask, void *pool_data) struct vdo_flush *flush = NULL; if ((gfp_mask & GFP_NOWAIT) == GFP_NOWAIT) { - flush = uds_allocate_memory_nowait(sizeof(struct vdo_flush), __func__); + flush = vdo_allocate_memory_nowait(sizeof(struct vdo_flush), __func__); } else { - int result = uds_allocate(1, struct vdo_flush, __func__, &flush); + int result = vdo_allocate(1, struct vdo_flush, __func__, &flush); if (result != VDO_SUCCESS) uds_log_error_strerror(result, "failed to allocate spare flush"); @@ -123,7 +123,7 @@ static void *allocate_flush(gfp_t gfp_mask, void *pool_data) static void free_flush(void *element, void *pool_data __always_unused) { - uds_free(element); + vdo_free(element); } /** @@ -134,7 +134,7 @@ static void free_flush(void *element, void *pool_data __always_unused) */ int vdo_make_flusher(struct vdo *vdo) { - int result = uds_allocate(1, struct flusher, __func__, &vdo->flusher); + int result = vdo_allocate(1, struct flusher, __func__, &vdo->flusher); if (result != VDO_SUCCESS) return result; @@ -162,8 +162,8 @@ void vdo_free_flusher(struct flusher *flusher) return; if (flusher->flush_pool != NULL) - mempool_destroy(uds_forget(flusher->flush_pool)); - uds_free(flusher); + mempool_destroy(vdo_forget(flusher->flush_pool)); + vdo_free(flusher); } /** diff --git a/drivers/md/dm-vdo/funnel-queue.c b/drivers/md/dm-vdo/funnel-queue.c index d5d96bb38b94..67f7b52ecc86 100644 --- a/drivers/md/dm-vdo/funnel-queue.c +++ b/drivers/md/dm-vdo/funnel-queue.c @@ -14,7 +14,7 @@ int uds_make_funnel_queue(struct funnel_queue **queue_ptr) int result; struct funnel_queue *queue; - result = uds_allocate(1, struct funnel_queue, "funnel queue", &queue); + result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); if (result != UDS_SUCCESS) return result; @@ -32,7 +32,7 @@ int uds_make_funnel_queue(struct funnel_queue **queue_ptr) void uds_free_funnel_queue(struct funnel_queue *queue) { - uds_free(queue); + vdo_free(queue); } static struct funnel_queue_entry *get_oldest(struct funnel_queue *queue) diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index 8f0ada13e549..ebf8dce67086 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -276,8 +276,8 @@ static void free_simple_work_queue(struct simple_work_queue *queue) for (i = 0; i <= VDO_WORK_Q_MAX_PRIORITY; i++) uds_free_funnel_queue(queue->priority_lists[i]); - uds_free(queue->common.name); - uds_free(queue); + vdo_free(queue->common.name); + vdo_free(queue); } static void free_round_robin_work_queue(struct round_robin_work_queue *queue) @@ -290,9 +290,9 @@ static void free_round_robin_work_queue(struct round_robin_work_queue *queue) for (i = 0; i < count; i++) free_simple_work_queue(queue_table[i]); - uds_free(queue_table); - uds_free(queue->common.name); - uds_free(queue); + vdo_free(queue_table); + vdo_free(queue->common.name); + vdo_free(queue); } void vdo_free_work_queue(struct vdo_work_queue *queue) @@ -323,7 +323,7 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na "queue priority count %u within limit %u", type->max_priority, VDO_WORK_Q_MAX_PRIORITY); - result = uds_allocate(1, struct simple_work_queue, "simple work queue", &queue); + result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue); if (result != UDS_SUCCESS) return result; @@ -333,9 +333,9 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na queue->common.owner = owner; init_waitqueue_head(&queue->waiting_worker_threads); - result = uds_duplicate_string(name, "queue name", &queue->common.name); + result = vdo_duplicate_string(name, "queue name", &queue->common.name); if (result != VDO_SUCCESS) { - uds_free(queue); + vdo_free(queue); return -ENOMEM; } @@ -399,15 +399,15 @@ int vdo_make_work_queue(const char *thread_name_prefix, const char *name, return result; } - result = uds_allocate(1, struct round_robin_work_queue, "round-robin work queue", + result = vdo_allocate(1, struct round_robin_work_queue, "round-robin work queue", &queue); if (result != UDS_SUCCESS) return result; - result = uds_allocate(thread_count, struct simple_work_queue *, + result = vdo_allocate(thread_count, struct simple_work_queue *, "subordinate work queues", &queue->service_queues); if (result != UDS_SUCCESS) { - uds_free(queue); + vdo_free(queue); return result; } @@ -415,10 +415,10 @@ int vdo_make_work_queue(const char *thread_name_prefix, const char *name, queue->common.round_robin_mode = true; queue->common.owner = owner; - result = uds_duplicate_string(name, "queue name", &queue->common.name); + result = vdo_duplicate_string(name, "queue name", &queue->common.name); if (result != VDO_SUCCESS) { - uds_free(queue->service_queues); - uds_free(queue); + vdo_free(queue->service_queues); + vdo_free(queue); return -ENOMEM; } @@ -433,7 +433,7 @@ int vdo_make_work_queue(const char *thread_name_prefix, const char *name, if (result != VDO_SUCCESS) { queue->num_service_queues = i; /* Destroy previously created subordinates. */ - vdo_free_work_queue(uds_forget(*queue_ptr)); + vdo_free_work_queue(vdo_forget(*queue_ptr)); return result; } } diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c index 6487825ada90..9477150362ae 100644 --- a/drivers/md/dm-vdo/indexer/chapter-index.c +++ b/drivers/md/dm-vdo/indexer/chapter-index.c @@ -20,7 +20,7 @@ int uds_make_open_chapter_index(struct open_chapter_index **chapter_index, size_t memory_size; struct open_chapter_index *index; - result = uds_allocate(1, struct open_chapter_index, "open chapter index", &index); + result = vdo_allocate(1, struct open_chapter_index, "open chapter index", &index); if (result != UDS_SUCCESS) return result; @@ -37,7 +37,7 @@ int uds_make_open_chapter_index(struct open_chapter_index **chapter_index, geometry->chapter_payload_bits, memory_size, 'm'); if (result != UDS_SUCCESS) { - uds_free(index); + vdo_free(index); return result; } @@ -52,7 +52,7 @@ void uds_free_open_chapter_index(struct open_chapter_index *chapter_index) return; uds_uninitialize_delta_index(&chapter_index->delta_index); - uds_free(chapter_index); + vdo_free(chapter_index); } /* Re-initialize an open chapter index for a new chapter. */ diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c index b572350a3d5f..cd20ee8b9a02 100644 --- a/drivers/md/dm-vdo/indexer/config.c +++ b/drivers/md/dm-vdo/indexer/config.c @@ -325,7 +325,7 @@ int uds_make_configuration(const struct uds_parameters *params, if (result != UDS_SUCCESS) return result; - result = uds_allocate(1, struct uds_configuration, __func__, &config); + result = vdo_allocate(1, struct uds_configuration, __func__, &config); if (result != UDS_SUCCESS) return result; @@ -356,7 +356,7 @@ void uds_free_configuration(struct uds_configuration *config) { if (config != NULL) { uds_free_index_geometry(config->geometry); - uds_free(config); + vdo_free(config); } } diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c index 8eece0ba6d93..11f7b85b6710 100644 --- a/drivers/md/dm-vdo/indexer/delta-index.c +++ b/drivers/md/dm-vdo/indexer/delta-index.c @@ -296,12 +296,12 @@ void uds_uninitialize_delta_index(struct delta_index *delta_index) return; for (z = 0; z < delta_index->zone_count; z++) { - uds_free(uds_forget(delta_index->delta_zones[z].new_offsets)); - uds_free(uds_forget(delta_index->delta_zones[z].delta_lists)); - uds_free(uds_forget(delta_index->delta_zones[z].memory)); + vdo_free(vdo_forget(delta_index->delta_zones[z].new_offsets)); + vdo_free(vdo_forget(delta_index->delta_zones[z].delta_lists)); + vdo_free(vdo_forget(delta_index->delta_zones[z].memory)); } - uds_free(delta_index->delta_zones); + vdo_free(delta_index->delta_zones); memset(delta_index, 0, sizeof(struct delta_index)); } @@ -311,17 +311,17 @@ static int initialize_delta_zone(struct delta_zone *delta_zone, size_t size, { int result; - result = uds_allocate(size, u8, "delta list", &delta_zone->memory); + result = vdo_allocate(size, u8, "delta list", &delta_zone->memory); if (result != UDS_SUCCESS) return result; - result = uds_allocate(list_count + 2, u64, "delta list temp", + result = vdo_allocate(list_count + 2, u64, "delta list temp", &delta_zone->new_offsets); if (result != UDS_SUCCESS) return result; /* Allocate the delta lists. */ - result = uds_allocate(list_count + 2, struct delta_list, "delta lists", + result = vdo_allocate(list_count + 2, struct delta_list, "delta lists", &delta_zone->delta_lists); if (result != UDS_SUCCESS) return result; @@ -352,7 +352,7 @@ int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zon unsigned int z; size_t zone_memory; - result = uds_allocate(zone_count, struct delta_zone, "Delta Index Zones", + result = vdo_allocate(zone_count, struct delta_zone, "Delta Index Zones", &delta_index->delta_zones); if (result != UDS_SUCCESS) return result; @@ -1047,7 +1047,7 @@ int uds_finish_restoring_delta_index(struct delta_index *delta_index, unsigned int z; u8 *data; - result = uds_allocate(DELTA_LIST_MAX_BYTE_COUNT, u8, __func__, &data); + result = vdo_allocate(DELTA_LIST_MAX_BYTE_COUNT, u8, __func__, &data); if (result != UDS_SUCCESS) return result; @@ -1062,7 +1062,7 @@ int uds_finish_restoring_delta_index(struct delta_index *delta_index, } } - uds_free(data); + vdo_free(data); return saved_result; } diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c index d2b49e39550c..95a402ec31c9 100644 --- a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c +++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c @@ -198,7 +198,7 @@ int uds_make_request_queue(const char *queue_name, int result; struct uds_request_queue *queue; - result = uds_allocate(1, struct uds_request_queue, __func__, &queue); + result = vdo_allocate(1, struct uds_request_queue, __func__, &queue); if (result != UDS_SUCCESS) return result; @@ -275,5 +275,5 @@ void uds_request_queue_finish(struct uds_request_queue *queue) uds_free_funnel_queue(queue->main_queue); uds_free_funnel_queue(queue->retry_queue); - uds_free(queue); + vdo_free(queue); } diff --git a/drivers/md/dm-vdo/indexer/geometry.c b/drivers/md/dm-vdo/indexer/geometry.c index 38c18283cdde..c735e6cb4425 100644 --- a/drivers/md/dm-vdo/indexer/geometry.c +++ b/drivers/md/dm-vdo/indexer/geometry.c @@ -61,7 +61,7 @@ int uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter, int result; struct index_geometry *geometry; - result = uds_allocate(1, struct index_geometry, "geometry", &geometry); + result = vdo_allocate(1, struct index_geometry, "geometry", &geometry); if (result != UDS_SUCCESS) return result; @@ -121,7 +121,7 @@ int uds_copy_index_geometry(struct index_geometry *source, void uds_free_index_geometry(struct index_geometry *geometry) { - uds_free(geometry); + vdo_free(geometry); } u32 __must_check uds_map_to_physical_chapter(const struct index_geometry *geometry, diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index a0227a75814b..c1bcff03cc55 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -484,7 +484,7 @@ static int __must_check make_index_save_region_table(struct index_save_layout *i type = RH_TYPE_UNSAVED; } - result = uds_allocate_extended(struct region_table, region_count, + result = vdo_allocate_extended(struct region_table, region_count, struct layout_region, "layout region table for ISL", &table); if (result != UDS_SUCCESS) @@ -545,7 +545,7 @@ static int __must_check write_index_save_header(struct index_save_layout *isl, u8 *buffer; size_t offset = 0; - result = uds_allocate(table->encoded_size, u8, "index save data", &buffer); + result = vdo_allocate(table->encoded_size, u8, "index save data", &buffer); if (result != UDS_SUCCESS) return result; @@ -564,7 +564,7 @@ static int __must_check write_index_save_header(struct index_save_layout *isl, } result = uds_write_to_buffered_writer(writer, buffer, offset); - uds_free(buffer); + vdo_free(buffer); if (result != UDS_SUCCESS) return result; @@ -584,12 +584,12 @@ static int write_index_save_layout(struct index_layout *layout, result = open_region_writer(layout, &isl->header, &writer); if (result != UDS_SUCCESS) { - uds_free(table); + vdo_free(table); return result; } result = write_index_save_header(isl, table, writer); - uds_free(table); + vdo_free(table); uds_free_buffered_writer(writer); return result; @@ -667,7 +667,7 @@ static int __must_check make_layout_region_table(struct index_layout *layout, struct region_table *table; struct layout_region *lr; - result = uds_allocate_extended(struct region_table, region_count, + result = vdo_allocate_extended(struct region_table, region_count, struct layout_region, "layout region table", &table); if (result != UDS_SUCCESS) @@ -715,7 +715,7 @@ static int __must_check write_layout_header(struct index_layout *layout, u8 *buffer; size_t offset = 0; - result = uds_allocate(table->encoded_size, u8, "layout data", &buffer); + result = vdo_allocate(table->encoded_size, u8, "layout data", &buffer); if (result != UDS_SUCCESS) return result; @@ -739,7 +739,7 @@ static int __must_check write_layout_header(struct index_layout *layout, } result = uds_write_to_buffered_writer(writer, buffer, offset); - uds_free(buffer); + vdo_free(buffer); if (result != UDS_SUCCESS) return result; @@ -785,12 +785,12 @@ static int __must_check save_layout(struct index_layout *layout, off_t offset) result = open_layout_writer(layout, &layout->header, offset, &writer); if (result != UDS_SUCCESS) { - uds_free(table); + vdo_free(table); return result; } result = write_layout_header(layout, table, writer); - uds_free(table); + vdo_free(table); uds_free_buffered_writer(writer); return result; @@ -805,7 +805,7 @@ static int create_index_layout(struct index_layout *layout, struct uds_configura if (result != UDS_SUCCESS) return result; - result = uds_allocate(sizes.save_count, struct index_save_layout, __func__, + result = vdo_allocate(sizes.save_count, struct index_save_layout, __func__, &layout->index.saves); if (result != UDS_SUCCESS) return result; @@ -1162,7 +1162,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, header.version); } - result = uds_allocate_extended(struct region_table, header.region_count, + result = vdo_allocate_extended(struct region_table, header.region_count, struct layout_region, "single file layout region table", &table); if (result != UDS_SUCCESS) @@ -1176,7 +1176,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, region_buffer, sizeof(region_buffer)); if (result != UDS_SUCCESS) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(UDS_CORRUPT_DATA, "cannot read region table layouts"); } @@ -1201,13 +1201,13 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, u8 *buffer; size_t offset = 0; - result = uds_allocate(saved_size, u8, "super block data", &buffer); + result = vdo_allocate(saved_size, u8, "super block data", &buffer); if (result != UDS_SUCCESS) return result; result = uds_read_from_buffered_reader(reader, buffer, saved_size); if (result != UDS_SUCCESS) { - uds_free(buffer); + vdo_free(buffer); return uds_log_error_strerror(result, "cannot read region table header"); } @@ -1232,7 +1232,7 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, super->start_offset = 0; } - uds_free(buffer); + vdo_free(buffer); if (memcmp(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE) != 0) return uds_log_error_strerror(UDS_CORRUPT_DATA, @@ -1335,7 +1335,7 @@ static int __must_check reconstitute_layout(struct index_layout *layout, int result; u64 next_block = first_block; - result = uds_allocate(layout->super.max_saves, struct index_save_layout, + result = vdo_allocate(layout->super.max_saves, struct index_save_layout, __func__, &layout->index.saves); if (result != UDS_SUCCESS) return result; @@ -1386,19 +1386,19 @@ static int __must_check load_super_block(struct index_layout *layout, size_t blo return result; if (table->header.type != RH_TYPE_SUPER) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(UDS_CORRUPT_DATA, "not a superblock region table"); } result = read_super_block_data(reader, layout, table->header.payload); if (result != UDS_SUCCESS) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(result, "unknown superblock format"); } if (super->block_size != block_size) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(UDS_CORRUPT_DATA, "superblock saved block_size %u differs from supplied block_size %zu", super->block_size, block_size); @@ -1406,7 +1406,7 @@ static int __must_check load_super_block(struct index_layout *layout, size_t blo first_block -= (super->volume_offset - super->start_offset); result = reconstitute_layout(layout, table, first_block); - uds_free(table); + vdo_free(table); return result; } @@ -1545,7 +1545,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, if (table->header.region_blocks != isl->index_save.block_count) { u64 region_blocks = table->header.region_blocks; - uds_free(table); + vdo_free(table); return uds_log_error_strerror(UDS_CORRUPT_DATA, "unexpected index save %u region block count %llu", instance, @@ -1553,14 +1553,14 @@ static int __must_check load_index_save(struct index_save_layout *isl, } if (table->header.type == RH_TYPE_UNSAVED) { - uds_free(table); + vdo_free(table); reset_index_save_layout(isl, 0); return UDS_SUCCESS; } if (table->header.type != RH_TYPE_SAVE) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(UDS_CORRUPT_DATA, "unexpected index save %u header type %u", instance, table->header.type); @@ -1568,14 +1568,14 @@ static int __must_check load_index_save(struct index_save_layout *isl, result = read_index_save_data(reader, isl, table->header.payload); if (result != UDS_SUCCESS) { - uds_free(table); + vdo_free(table); return uds_log_error_strerror(result, "unknown index save %u data format", instance); } result = reconstruct_index_save(isl, table); - uds_free(table); + vdo_free(table); if (result != UDS_SUCCESS) { return uds_log_error_strerror(result, "cannot reconstruct index save %u", instance); @@ -1695,7 +1695,7 @@ int uds_make_index_layout(struct uds_configuration *config, bool new_layout, if (result != UDS_SUCCESS) return result; - result = uds_allocate(1, struct index_layout, __func__, &layout); + result = vdo_allocate(1, struct index_layout, __func__, &layout); if (result != UDS_SUCCESS) return result; @@ -1731,11 +1731,11 @@ void uds_free_index_layout(struct index_layout *layout) if (layout == NULL) return; - uds_free(layout->index.saves); + vdo_free(layout->index.saves); if (layout->factory != NULL) uds_put_io_factory(layout->factory); - uds_free(layout); + vdo_free(layout); } int uds_replace_index_layout_storage(struct index_layout *layout, diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c index 37037ac8eee9..ddb6d843cbd9 100644 --- a/drivers/md/dm-vdo/indexer/index-page-map.c +++ b/drivers/md/dm-vdo/indexer/index-page-map.c @@ -38,13 +38,13 @@ int uds_make_index_page_map(const struct index_geometry *geometry, int result; struct index_page_map *map; - result = uds_allocate(1, struct index_page_map, "page map", &map); + result = vdo_allocate(1, struct index_page_map, "page map", &map); if (result != UDS_SUCCESS) return result; map->geometry = geometry; map->entries_per_chapter = geometry->index_pages_per_chapter - 1; - result = uds_allocate(get_entry_count(geometry), u16, "Index Page Map Entries", + result = vdo_allocate(get_entry_count(geometry), u16, "Index Page Map Entries", &map->entries); if (result != UDS_SUCCESS) { uds_free_index_page_map(map); @@ -58,8 +58,8 @@ int uds_make_index_page_map(const struct index_geometry *geometry, void uds_free_index_page_map(struct index_page_map *map) { if (map != NULL) { - uds_free(map->entries); - uds_free(map); + vdo_free(map->entries); + vdo_free(map); } } @@ -118,7 +118,7 @@ int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer u64 saved_size = uds_compute_index_page_map_save_size(map->geometry); u32 i; - result = uds_allocate(saved_size, u8, "page map data", &buffer); + result = vdo_allocate(saved_size, u8, "page map data", &buffer); if (result != UDS_SUCCESS) return result; @@ -129,7 +129,7 @@ int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer encode_u16_le(buffer, &offset, map->entries[i]); result = uds_write_to_buffered_writer(writer, buffer, offset); - uds_free(buffer); + vdo_free(buffer); if (result != UDS_SUCCESS) return result; @@ -145,20 +145,20 @@ int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader * u64 saved_size = uds_compute_index_page_map_save_size(map->geometry); u32 i; - result = uds_allocate(saved_size, u8, "page map data", &buffer); + result = vdo_allocate(saved_size, u8, "page map data", &buffer); if (result != UDS_SUCCESS) return result; result = uds_read_from_buffered_reader(reader, buffer, saved_size); if (result != UDS_SUCCESS) { - uds_free(buffer); + vdo_free(buffer); return result; } memcpy(&magic, buffer, PAGE_MAP_MAGIC_LENGTH); offset += PAGE_MAP_MAGIC_LENGTH; if (memcmp(magic, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH) != 0) { - uds_free(buffer); + vdo_free(buffer); return UDS_CORRUPT_DATA; } @@ -166,7 +166,7 @@ int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader * for (i = 0; i < get_entry_count(map->geometry); i++) decode_u16_le(buffer, &offset, &map->entries[i]); - uds_free(buffer); + vdo_free(buffer); uds_log_debug("read index page map, last update %llu", (unsigned long long) map->last_update); return UDS_SUCCESS; diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c index 07b478f57c68..0f920a583021 100644 --- a/drivers/md/dm-vdo/indexer/index-session.c +++ b/drivers/md/dm-vdo/indexer/index-session.c @@ -221,7 +221,7 @@ static int __must_check make_empty_index_session(struct uds_index_session **inde int result; struct uds_index_session *session; - result = uds_allocate(1, struct uds_index_session, __func__, &session); + result = vdo_allocate(1, struct uds_index_session, __func__, &session); if (result != UDS_SUCCESS) return result; @@ -233,7 +233,7 @@ static int __must_check make_empty_index_session(struct uds_index_session **inde result = uds_make_request_queue("callbackW", &handle_callbacks, &session->callback_queue); if (result != UDS_SUCCESS) { - uds_free(session); + vdo_free(session); return result; } @@ -673,7 +673,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session) uds_request_queue_finish(index_session->callback_queue); index_session->callback_queue = NULL; uds_log_debug("Destroyed index session"); - uds_free(index_session); + vdo_free(index_session); return uds_status_to_errno(result); } diff --git a/drivers/md/dm-vdo/indexer/index.c b/drivers/md/dm-vdo/indexer/index.c index 35e3b45cdb71..c576033b8a53 100644 --- a/drivers/md/dm-vdo/indexer/index.c +++ b/drivers/md/dm-vdo/indexer/index.c @@ -88,7 +88,7 @@ static int launch_zone_message(struct uds_zone_message message, unsigned int zon int result; struct uds_request *request; - result = uds_allocate(1, struct uds_request, __func__, &request); + result = vdo_allocate(1, struct uds_request, __func__, &request); if (result != UDS_SUCCESS) return result; @@ -623,7 +623,7 @@ static void execute_zone_request(struct uds_request *request) } /* Once the message is processed it can be freed. */ - uds_free(uds_forget(request)); + vdo_free(vdo_forget(request)); return; } @@ -755,8 +755,8 @@ static void free_chapter_writer(struct chapter_writer *writer) stop_chapter_writer(writer); uds_free_open_chapter_index(writer->open_chapter_index); - uds_free(writer->collated_records); - uds_free(writer); + vdo_free(writer->collated_records); + vdo_free(writer); } static int make_chapter_writer(struct uds_index *index, @@ -767,7 +767,7 @@ static int make_chapter_writer(struct uds_index *index, size_t collated_records_size = (sizeof(struct uds_volume_record) * index->volume->geometry->records_per_chapter); - result = uds_allocate_extended(struct chapter_writer, index->zone_count, + result = vdo_allocate_extended(struct chapter_writer, index->zone_count, struct open_chapter_zone *, "Chapter Writer", &writer); if (result != UDS_SUCCESS) @@ -777,7 +777,7 @@ static int make_chapter_writer(struct uds_index *index, mutex_init(&writer->mutex); uds_init_cond(&writer->cond); - result = uds_allocate_cache_aligned(collated_records_size, "collated records", + result = vdo_allocate_cache_aligned(collated_records_size, "collated records", &writer->collated_records); if (result != UDS_SUCCESS) { free_chapter_writer(writer); @@ -1118,7 +1118,7 @@ static void free_index_zone(struct index_zone *zone) uds_free_open_chapter(zone->open_chapter); uds_free_open_chapter(zone->writing_chapter); - uds_free(zone); + vdo_free(zone); } static int make_index_zone(struct uds_index *index, unsigned int zone_number) @@ -1126,7 +1126,7 @@ static int make_index_zone(struct uds_index *index, unsigned int zone_number) int result; struct index_zone *zone; - result = uds_allocate(1, struct index_zone, "index zone", &zone); + result = vdo_allocate(1, struct index_zone, "index zone", &zone); if (result != UDS_SUCCESS) return result; @@ -1163,7 +1163,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op u64 nonce; unsigned int z; - result = uds_allocate_extended(struct uds_index, config->zone_count, + result = vdo_allocate_extended(struct uds_index, config->zone_count, struct uds_request_queue *, "index", &index); if (result != UDS_SUCCESS) return result; @@ -1176,7 +1176,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op return result; } - result = uds_allocate(index->zone_count, struct index_zone *, "zones", + result = vdo_allocate(index->zone_count, struct index_zone *, "zones", &index->zones); if (result != UDS_SUCCESS) { uds_free_index(index); @@ -1289,12 +1289,12 @@ void uds_free_index(struct uds_index *index) if (index->zones != NULL) { for (i = 0; i < index->zone_count; i++) free_index_zone(index->zones[i]); - uds_free(index->zones); + vdo_free(index->zones); } uds_free_volume(index->volume); - uds_free_index_layout(uds_forget(index->layout)); - uds_free(index); + uds_free_index_layout(vdo_forget(index->layout)); + vdo_free(index); } /* Wait for the chapter writer to complete any outstanding writes. */ diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c index fecd436986ae..749c950c0189 100644 --- a/drivers/md/dm-vdo/indexer/io-factory.c +++ b/drivers/md/dm-vdo/indexer/io-factory.c @@ -64,7 +64,7 @@ int uds_make_io_factory(struct block_device *bdev, struct io_factory **factory_p int result; struct io_factory *factory; - result = uds_allocate(1, struct io_factory, __func__, &factory); + result = vdo_allocate(1, struct io_factory, __func__, &factory); if (result != UDS_SUCCESS) return result; @@ -85,7 +85,7 @@ int uds_replace_storage(struct io_factory *factory, struct block_device *bdev) void uds_put_io_factory(struct io_factory *factory) { if (atomic_add_return(-1, &factory->ref_count) <= 0) - uds_free(factory); + vdo_free(factory); } size_t uds_get_writable_size(struct io_factory *factory) @@ -129,7 +129,7 @@ void uds_free_buffered_reader(struct buffered_reader *reader) dm_bufio_client_destroy(reader->client); uds_put_io_factory(reader->factory); - uds_free(reader); + vdo_free(reader); } /* Create a buffered reader for an index region starting at offset. */ @@ -144,7 +144,7 @@ int uds_make_buffered_reader(struct io_factory *factory, off_t offset, u64 block if (result != UDS_SUCCESS) return result; - result = uds_allocate(1, struct buffered_reader, "buffered reader", &reader); + result = vdo_allocate(1, struct buffered_reader, "buffered reader", &reader); if (result != UDS_SUCCESS) { dm_bufio_client_destroy(client); return result; @@ -177,7 +177,7 @@ static int position_reader(struct buffered_reader *reader, sector_t block_number return UDS_OUT_OF_RANGE; if (reader->buffer != NULL) - dm_bufio_release(uds_forget(reader->buffer)); + dm_bufio_release(vdo_forget(reader->buffer)); data = dm_bufio_read(reader->client, block_number, &buffer); if (IS_ERR(data)) @@ -282,7 +282,7 @@ int uds_make_buffered_writer(struct io_factory *factory, off_t offset, u64 block if (result != UDS_SUCCESS) return result; - result = uds_allocate(1, struct buffered_writer, "buffered writer", &writer); + result = vdo_allocate(1, struct buffered_writer, "buffered writer", &writer); if (result != UDS_SUCCESS) { dm_bufio_client_destroy(client); return result; @@ -369,7 +369,7 @@ void uds_free_buffered_writer(struct buffered_writer *writer) dm_bufio_client_destroy(writer->client); uds_put_io_factory(writer->factory); - uds_free(writer); + vdo_free(writer); } /* diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c index cd2d35e39c20..4a4dc94915dd 100644 --- a/drivers/md/dm-vdo/indexer/open-chapter.c +++ b/drivers/md/dm-vdo/indexer/open-chapter.c @@ -68,7 +68,7 @@ int uds_make_open_chapter(const struct index_geometry *geometry, unsigned int zo size_t capacity = geometry->records_per_chapter / zone_count; size_t slot_count = (1 << bits_per(capacity * LOAD_RATIO)); - result = uds_allocate_extended(struct open_chapter_zone, slot_count, + result = vdo_allocate_extended(struct open_chapter_zone, slot_count, struct open_chapter_zone_slot, "open chapter", &open_chapter); if (result != UDS_SUCCESS) @@ -76,7 +76,7 @@ int uds_make_open_chapter(const struct index_geometry *geometry, unsigned int zo open_chapter->slot_count = slot_count; open_chapter->capacity = capacity; - result = uds_allocate_cache_aligned(records_size(open_chapter), "record pages", + result = vdo_allocate_cache_aligned(records_size(open_chapter), "record pages", &open_chapter->records); if (result != UDS_SUCCESS) { uds_free_open_chapter(open_chapter); @@ -194,8 +194,8 @@ void uds_remove_from_open_chapter(struct open_chapter_zone *open_chapter, void uds_free_open_chapter(struct open_chapter_zone *open_chapter) { if (open_chapter != NULL) { - uds_free(open_chapter->records); - uds_free(open_chapter); + vdo_free(open_chapter->records); + vdo_free(open_chapter); } } diff --git a/drivers/md/dm-vdo/indexer/radix-sort.c b/drivers/md/dm-vdo/indexer/radix-sort.c index b86d55f0827e..74ea18b8e9be 100644 --- a/drivers/md/dm-vdo/indexer/radix-sort.c +++ b/drivers/md/dm-vdo/indexer/radix-sort.c @@ -211,7 +211,7 @@ int uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter) unsigned int stack_size = count / INSERTION_SORT_THRESHOLD; struct radix_sorter *radix_sorter; - result = uds_allocate_extended(struct radix_sorter, stack_size, struct task, + result = vdo_allocate_extended(struct radix_sorter, stack_size, struct task, __func__, &radix_sorter); if (result != UDS_SUCCESS) return result; @@ -224,7 +224,7 @@ int uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter) void uds_free_radix_sorter(struct radix_sorter *sorter) { - uds_free(sorter); + vdo_free(sorter); } /* diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.c b/drivers/md/dm-vdo/indexer/sparse-cache.c index 9e8672cba3fa..e297ba2d6ceb 100644 --- a/drivers/md/dm-vdo/indexer/sparse-cache.c +++ b/drivers/md/dm-vdo/indexer/sparse-cache.c @@ -222,12 +222,12 @@ static int __must_check initialize_cached_chapter_index(struct cached_chapter_in chapter->virtual_chapter = NO_CHAPTER; chapter->index_pages_count = geometry->index_pages_per_chapter; - result = uds_allocate(chapter->index_pages_count, struct delta_index_page, + result = vdo_allocate(chapter->index_pages_count, struct delta_index_page, __func__, &chapter->index_pages); if (result != UDS_SUCCESS) return result; - return uds_allocate(chapter->index_pages_count, struct dm_buffer *, + return vdo_allocate(chapter->index_pages_count, struct dm_buffer *, "sparse index volume pages", &chapter->page_buffers); } @@ -241,7 +241,7 @@ static int __must_check make_search_list(struct sparse_cache *cache, bytes = (sizeof(struct search_list) + (cache->capacity * sizeof(struct cached_chapter_index *))); - result = uds_allocate_cache_aligned(bytes, "search list", &list); + result = vdo_allocate_cache_aligned(bytes, "search list", &list); if (result != UDS_SUCCESS) return result; @@ -264,7 +264,7 @@ int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int ca unsigned int bytes; bytes = (sizeof(struct sparse_cache) + (capacity * sizeof(struct cached_chapter_index))); - result = uds_allocate_cache_aligned(bytes, "sparse cache", &cache); + result = vdo_allocate_cache_aligned(bytes, "sparse cache", &cache); if (result != UDS_SUCCESS) return result; @@ -294,7 +294,7 @@ int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int ca } /* purge_search_list() needs some temporary lists for sorting. */ - result = uds_allocate(capacity * 2, struct cached_chapter_index *, + result = vdo_allocate(capacity * 2, struct cached_chapter_index *, "scratch entries", &cache->scratch_entries); if (result != UDS_SUCCESS) goto out; @@ -338,7 +338,7 @@ static void release_cached_chapter_index(struct cached_chapter_index *chapter) for (i = 0; i < chapter->index_pages_count; i++) { if (chapter->page_buffers[i] != NULL) - dm_bufio_release(uds_forget(chapter->page_buffers[i])); + dm_bufio_release(vdo_forget(chapter->page_buffers[i])); } } @@ -349,18 +349,18 @@ void uds_free_sparse_cache(struct sparse_cache *cache) if (cache == NULL) return; - uds_free(cache->scratch_entries); + vdo_free(cache->scratch_entries); for (i = 0; i < cache->zone_count; i++) - uds_free(cache->search_lists[i]); + vdo_free(cache->search_lists[i]); for (i = 0; i < cache->capacity; i++) { release_cached_chapter_index(&cache->chapters[i]); - uds_free(cache->chapters[i].index_pages); - uds_free(cache->chapters[i].page_buffers); + vdo_free(cache->chapters[i].index_pages); + vdo_free(cache->chapters[i].page_buffers); } - uds_free(cache); + vdo_free(cache); } /* diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index a88e515ceef6..1a762e6dd709 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -279,8 +279,8 @@ static int compute_volume_sub_index_parameters(const struct uds_configuration *c static void uninitialize_volume_sub_index(struct volume_sub_index *sub_index) { - uds_free(uds_forget(sub_index->flush_chapters)); - uds_free(uds_forget(sub_index->zones)); + vdo_free(vdo_forget(sub_index->flush_chapters)); + vdo_free(vdo_forget(sub_index->zones)); uds_uninitialize_delta_index(&sub_index->delta_index); } @@ -290,11 +290,11 @@ void uds_free_volume_index(struct volume_index *volume_index) return; if (volume_index->zones != NULL) - uds_free(uds_forget(volume_index->zones)); + vdo_free(vdo_forget(volume_index->zones)); uninitialize_volume_sub_index(&volume_index->vi_non_hook); uninitialize_volume_sub_index(&volume_index->vi_hook); - uds_free(volume_index); + vdo_free(volume_index); } @@ -1211,12 +1211,12 @@ static int initialize_volume_sub_index(const struct uds_configuration *config, (zone_count * sizeof(struct volume_sub_index_zone))); /* The following arrays are initialized to all zeros. */ - result = uds_allocate(params.list_count, u64, "first chapter to flush", + result = vdo_allocate(params.list_count, u64, "first chapter to flush", &sub_index->flush_chapters); if (result != UDS_SUCCESS) return result; - return uds_allocate(zone_count, struct volume_sub_index_zone, + return vdo_allocate(zone_count, struct volume_sub_index_zone, "volume index zones", &sub_index->zones); } @@ -1228,7 +1228,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non struct volume_index *volume_index; int result; - result = uds_allocate(1, struct volume_index, "volume index", &volume_index); + result = vdo_allocate(1, struct volume_index, "volume index", &volume_index); if (result != UDS_SUCCESS) return result; @@ -1249,7 +1249,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non volume_index->sparse_sample_rate = config->sparse_sample_rate; - result = uds_allocate(config->zone_count, struct volume_index_zone, + result = vdo_allocate(config->zone_count, struct volume_index_zone, "volume index zones", &volume_index->zones); if (result != UDS_SUCCESS) { uds_free_volume_index(volume_index); diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index 002a4264a163..2d8901732f5d 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -198,7 +198,7 @@ static void wait_for_pending_searches(struct page_cache *cache, u32 physical_pag static void release_page_buffer(struct cached_page *page) { if (page->buffer != NULL) - dm_bufio_release(uds_forget(page->buffer)); + dm_bufio_release(vdo_forget(page->buffer)); } static void clear_cache_page(struct page_cache *cache, struct cached_page *page) @@ -1482,7 +1482,7 @@ int __must_check uds_replace_volume_storage(struct volume *volume, if (volume->sparse_cache != NULL) uds_invalidate_sparse_cache(volume->sparse_cache); if (volume->client != NULL) - dm_bufio_client_destroy(uds_forget(volume->client)); + dm_bufio_client_destroy(vdo_forget(volume->client)); return uds_open_volume_bufio(layout, volume->geometry->bytes_per_page, volume->reserved_buffers, &volume->client); @@ -1507,22 +1507,22 @@ static int __must_check initialize_page_cache(struct page_cache *cache, if (result != UDS_SUCCESS) return result; - result = uds_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read, + result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read, "volume read queue", &cache->read_queue); if (result != UDS_SUCCESS) return result; - result = uds_allocate(cache->zone_count, struct search_pending_counter, + result = vdo_allocate(cache->zone_count, struct search_pending_counter, "Volume Cache Zones", &cache->search_pending_counters); if (result != UDS_SUCCESS) return result; - result = uds_allocate(cache->indexable_pages, u16, "page cache index", + result = vdo_allocate(cache->indexable_pages, u16, "page cache index", &cache->index); if (result != UDS_SUCCESS) return result; - result = uds_allocate(cache->cache_slots, struct cached_page, "page cache cache", + result = vdo_allocate(cache->cache_slots, struct cached_page, "page cache cache", &cache->cache); if (result != UDS_SUCCESS) return result; @@ -1546,7 +1546,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout unsigned int reserved_buffers; int result; - result = uds_allocate(1, struct volume, "volume", &volume); + result = vdo_allocate(1, struct volume, "volume", &volume); if (result != UDS_SUCCESS) return result; @@ -1583,7 +1583,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout return result; } - result = uds_allocate(geometry->records_per_page, + result = vdo_allocate(geometry->records_per_page, const struct uds_volume_record *, "record pointers", &volume->record_pointers); if (result != UDS_SUCCESS) { @@ -1624,7 +1624,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout uds_init_cond(&volume->read_threads_read_done_cond); uds_init_cond(&volume->read_threads_cond); - result = uds_allocate(config->read_threads, struct thread *, "reader threads", + result = vdo_allocate(config->read_threads, struct thread *, "reader threads", &volume->reader_threads); if (result != UDS_SUCCESS) { uds_free_volume(volume); @@ -1654,10 +1654,10 @@ static void uninitialize_page_cache(struct page_cache *cache) for (i = 0; i < cache->cache_slots; i++) release_page_buffer(&cache->cache[i]); } - uds_free(cache->index); - uds_free(cache->cache); - uds_free(cache->search_pending_counters); - uds_free(cache->read_queue); + vdo_free(cache->index); + vdo_free(cache->cache); + vdo_free(cache->search_pending_counters); + vdo_free(cache->read_queue); } void uds_free_volume(struct volume *volume) @@ -1675,7 +1675,7 @@ void uds_free_volume(struct volume *volume) mutex_unlock(&volume->read_threads_mutex); for (i = 0; i < volume->read_thread_count; i++) vdo_join_threads(volume->reader_threads[i]); - uds_free(volume->reader_threads); + vdo_free(volume->reader_threads); volume->reader_threads = NULL; } @@ -1683,11 +1683,11 @@ void uds_free_volume(struct volume *volume) uninitialize_page_cache(&volume->page_cache); uds_free_sparse_cache(volume->sparse_cache); if (volume->client != NULL) - dm_bufio_client_destroy(uds_forget(volume->client)); + dm_bufio_client_destroy(vdo_forget(volume->client)); uds_free_index_page_map(volume->index_page_map); uds_free_radix_sorter(volume->radix_sorter); - uds_free(volume->geometry); - uds_free(volume->record_pointers); - uds_free(volume); + vdo_free(volume->geometry); + vdo_free(volume->record_pointers); + vdo_free(volume); } diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c index e0953e013f15..0bd742ecbe2e 100644 --- a/drivers/md/dm-vdo/int-map.c +++ b/drivers/md/dm-vdo/int-map.c @@ -164,7 +164,7 @@ static int allocate_buckets(struct int_map *map, size_t capacity) * without have to wrap back around to element zero. */ map->bucket_count = capacity + (NEIGHBORHOOD - 1); - return uds_allocate(map->bucket_count, struct bucket, + return vdo_allocate(map->bucket_count, struct bucket, "struct int_map buckets", &map->buckets); } @@ -182,7 +182,7 @@ int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr) int result; size_t capacity; - result = uds_allocate(1, struct int_map, "struct int_map", &map); + result = vdo_allocate(1, struct int_map, "struct int_map", &map); if (result != UDS_SUCCESS) return result; @@ -197,7 +197,7 @@ int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr) result = allocate_buckets(map, capacity); if (result != UDS_SUCCESS) { - vdo_int_map_free(uds_forget(map)); + vdo_int_map_free(vdo_forget(map)); return result; } @@ -217,8 +217,8 @@ void vdo_int_map_free(struct int_map *map) if (map == NULL) return; - uds_free(uds_forget(map->buckets)); - uds_free(uds_forget(map)); + vdo_free(vdo_forget(map->buckets)); + vdo_free(vdo_forget(map)); } /** @@ -399,14 +399,14 @@ static int resize_buckets(struct int_map *map) result = vdo_int_map_put(map, entry->key, entry->value, true, NULL); if (result != UDS_SUCCESS) { /* Destroy the new partial map and restore the map from the stack. */ - uds_free(uds_forget(map->buckets)); + vdo_free(vdo_forget(map->buckets)); *map = old_map; return result; } } /* Destroy the old bucket array. */ - uds_free(uds_forget(old_map.buckets)); + vdo_free(vdo_forget(old_map.buckets)); return UDS_SUCCESS; } diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 6c050f2b3b44..23549b7e9e6d 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -380,7 +380,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter struct io_submitter *io_submitter; int result; - result = uds_allocate_extended(struct io_submitter, thread_count, + result = vdo_allocate_extended(struct io_submitter, thread_count, struct bio_queue_data, "bio submission data", &io_submitter); if (result != UDS_SUCCESS) @@ -422,7 +422,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter * Clean up the partially initialized bio-queue entirely and indicate that * initialization failed. */ - vdo_int_map_free(uds_forget(bio_queue_data->map)); + vdo_int_map_free(vdo_forget(bio_queue_data->map)); uds_log_error("bio queue initialization failed %d", result); vdo_cleanup_io_submitter(io_submitter); vdo_free_io_submitter(io_submitter); @@ -470,8 +470,8 @@ void vdo_free_io_submitter(struct io_submitter *io_submitter) for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--) { io_submitter->num_bio_queues_used--; /* vdo_destroy() will free the work queue, so just give up our reference to it. */ - uds_forget(io_submitter->bio_queue_data[i].queue); - vdo_int_map_free(uds_forget(io_submitter->bio_queue_data[i].map)); + vdo_forget(io_submitter->bio_queue_data[i].queue); + vdo_int_map_free(vdo_forget(io_submitter->bio_queue_data[i].map)); } - uds_free(io_submitter); + vdo_free(io_submitter); } diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index c5b3b1c111e3..ca5bc3be7978 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -94,7 +94,7 @@ int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr) if (zone_count == 0) return VDO_SUCCESS; - result = uds_allocate_extended(struct logical_zones, zone_count, + result = vdo_allocate_extended(struct logical_zones, zone_count, struct logical_zone, __func__, &zones); if (result != VDO_SUCCESS) return result; @@ -132,12 +132,12 @@ void vdo_free_logical_zones(struct logical_zones *zones) if (zones == NULL) return; - uds_free(uds_forget(zones->manager)); + vdo_free(vdo_forget(zones->manager)); for (index = 0; index < zones->zone_count; index++) - vdo_int_map_free(uds_forget(zones->zones[index].lbn_operations)); + vdo_int_map_free(vdo_forget(zones->zones[index].lbn_operations)); - uds_free(zones); + vdo_free(zones); } static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what) diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c index db085c1124be..8d5df3e45a24 100644 --- a/drivers/md/dm-vdo/memory-alloc.c +++ b/drivers/md/dm-vdo/memory-alloc.c @@ -37,7 +37,7 @@ static inline bool allocations_allowed(void) * @new_thread: registered_thread structure to use for the current thread * @flag_ptr: Location of the allocation-allowed flag */ -void uds_register_allocating_thread(struct registered_thread *new_thread, +void vdo_register_allocating_thread(struct registered_thread *new_thread, const bool *flag_ptr) { if (flag_ptr == NULL) { @@ -50,7 +50,7 @@ void uds_register_allocating_thread(struct registered_thread *new_thread, } /* Unregister the current thread as an allocating thread. */ -void uds_unregister_allocating_thread(void) +void vdo_unregister_allocating_thread(void) { vdo_unregister_thread(&allocating_threads); } @@ -148,7 +148,7 @@ static void remove_vmalloc_block(void *ptr) spin_unlock_irqrestore(&memory_stats.lock, flags); if (block != NULL) - uds_free(block); + vdo_free(block); else uds_log_info("attempting to remove ptr %px not found in vmalloc list", ptr); } @@ -196,7 +196,7 @@ static inline bool use_kmalloc(size_t size) * * Return: UDS_SUCCESS or an error code */ -int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) +int vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr) { /* * The __GFP_RETRY_MAYFAIL flag means the VM implementation will retry memory reclaim @@ -245,8 +245,7 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) } else { struct vmalloc_block_info *block; - if (uds_allocate(1, struct vmalloc_block_info, __func__, &block) == - UDS_SUCCESS) { + if (vdo_allocate(1, struct vmalloc_block_info, __func__, &block) == UDS_SUCCESS) { /* * It is possible for __vmalloc to fail to allocate memory because there * are no pages available. A short sleep may allow the page reclaimer @@ -259,7 +258,6 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) */ for (;;) { p = __vmalloc(size, gfp_flags | __GFP_NOWARN); - if (p != NULL) break; @@ -273,7 +271,7 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) } if (p == NULL) { - uds_free(block); + vdo_free(block); } else { block->ptr = p; block->size = PAGE_ALIGN(size); @@ -304,7 +302,7 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr) * * Return: pointer to the allocated memory, or NULL if the required space is not available. */ -void *uds_allocate_memory_nowait(size_t size, const char *what __maybe_unused) +void *vdo_allocate_memory_nowait(size_t size, const char *what __maybe_unused) { void *p = kmalloc(size, GFP_NOWAIT | __GFP_ZERO); @@ -314,7 +312,7 @@ void *uds_allocate_memory_nowait(size_t size, const char *what __maybe_unused) return p; } -void uds_free(void *ptr) +void vdo_free(void *ptr) { if (ptr != NULL) { if (is_vmalloc_addr(ptr)) { @@ -339,18 +337,18 @@ void uds_free(void *ptr) * * Return: UDS_SUCCESS or an error code */ -int uds_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *what, +int vdo_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *what, void *new_ptr) { int result; if (size == 0) { - uds_free(ptr); + vdo_free(ptr); *(void **) new_ptr = NULL; return UDS_SUCCESS; } - result = uds_allocate(size, char, what, new_ptr); + result = vdo_allocate(size, char, what, new_ptr); if (result != UDS_SUCCESS) return result; @@ -359,18 +357,18 @@ int uds_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *w size = old_size; memcpy(*((void **) new_ptr), ptr, size); - uds_free(ptr); + vdo_free(ptr); } return UDS_SUCCESS; } -int uds_duplicate_string(const char *string, const char *what, char **new_string) +int vdo_duplicate_string(const char *string, const char *what, char **new_string) { int result; u8 *dup; - result = uds_allocate(strlen(string) + 1, u8, what, &dup); + result = vdo_allocate(strlen(string) + 1, u8, what, &dup); if (result != UDS_SUCCESS) return result; @@ -379,13 +377,13 @@ int uds_duplicate_string(const char *string, const char *what, char **new_string return UDS_SUCCESS; } -void uds_memory_init(void) +void vdo_memory_init(void) { spin_lock_init(&memory_stats.lock); vdo_initialize_thread_registry(&allocating_threads); } -void uds_memory_exit(void) +void vdo_memory_exit(void) { ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", @@ -396,7 +394,7 @@ void uds_memory_exit(void) uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); } -void uds_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used) +void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used) { unsigned long flags; @@ -410,7 +408,7 @@ void uds_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used) * Report stats on any allocated memory that we're tracking. Not all allocation types are * guaranteed to be tracked in bytes (e.g., bios). */ -void uds_report_memory_usage(void) +void vdo_report_memory_usage(void) { unsigned long flags; u64 kmalloc_blocks; diff --git a/drivers/md/dm-vdo/memory-alloc.h b/drivers/md/dm-vdo/memory-alloc.h index d72d597f98cf..3f27dd722a2d 100644 --- a/drivers/md/dm-vdo/memory-alloc.h +++ b/drivers/md/dm-vdo/memory-alloc.h @@ -3,8 +3,8 @@ * Copyright 2023 Red Hat */ -#ifndef UDS_MEMORY_ALLOC_H -#define UDS_MEMORY_ALLOC_H +#ifndef VDO_MEMORY_ALLOC_H +#define VDO_MEMORY_ALLOC_H #include #include /* for PAGE_SIZE */ @@ -12,8 +12,8 @@ #include "permassert.h" #include "thread-registry.h" -/* Custom memory allocation function for UDS that tracks memory usage */ -int __must_check uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr); +/* Custom memory allocation function that tracks memory usage */ +int __must_check vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr); /* * Allocate storage based on element counts, sizes, and alignment. @@ -37,7 +37,7 @@ int __must_check uds_allocate_memory(size_t size, size_t align, const char *what * * Return: UDS_SUCCESS or an error code */ -static inline int uds_do_allocation(size_t count, size_t size, size_t extra, +static inline int vdo_do_allocation(size_t count, size_t size, size_t extra, size_t align, const char *what, void *ptr) { size_t total_size = count * size + extra; @@ -53,7 +53,7 @@ static inline int uds_do_allocation(size_t count, size_t size, size_t extra, total_size = SIZE_MAX; } - return uds_allocate_memory(total_size, align, what, ptr); + return vdo_allocate_memory(total_size, align, what, ptr); } /* @@ -67,8 +67,8 @@ static inline int uds_do_allocation(size_t count, size_t size, size_t extra, * * Return: UDS_SUCCESS or an error code */ -#define uds_allocate(COUNT, TYPE, WHAT, PTR) \ - uds_do_allocation(COUNT, sizeof(TYPE), 0, __alignof__(TYPE), WHAT, PTR) +#define vdo_allocate(COUNT, TYPE, WHAT, PTR) \ + vdo_do_allocation(COUNT, sizeof(TYPE), 0, __alignof__(TYPE), WHAT, PTR) /* * Allocate one object of an indicated type, followed by one or more elements of a second type, @@ -83,12 +83,12 @@ static inline int uds_do_allocation(size_t count, size_t size, size_t extra, * * Return: UDS_SUCCESS or an error code */ -#define uds_allocate_extended(TYPE1, COUNT, TYPE2, WHAT, PTR) \ +#define vdo_allocate_extended(TYPE1, COUNT, TYPE2, WHAT, PTR) \ __extension__({ \ int _result; \ TYPE1 **_ptr = (PTR); \ BUILD_BUG_ON(__alignof__(TYPE1) < __alignof__(TYPE2)); \ - _result = uds_do_allocation(COUNT, \ + _result = vdo_do_allocation(COUNT, \ sizeof(TYPE2), \ sizeof(TYPE1), \ __alignof__(TYPE1), \ @@ -107,9 +107,9 @@ static inline int uds_do_allocation(size_t count, size_t size, size_t extra, * * Return: UDS_SUCCESS or an error code */ -static inline int __must_check uds_allocate_cache_aligned(size_t size, const char *what, void *ptr) +static inline int __must_check vdo_allocate_cache_aligned(size_t size, const char *what, void *ptr) { - return uds_allocate_memory(size, L1_CACHE_BYTES, what, ptr); + return vdo_allocate_memory(size, L1_CACHE_BYTES, what, ptr); } /* @@ -121,18 +121,18 @@ static inline int __must_check uds_allocate_cache_aligned(size_t size, const cha * * Return: pointer to the memory, or NULL if the memory is not available. */ -void *__must_check uds_allocate_memory_nowait(size_t size, const char *what); +void *__must_check vdo_allocate_memory_nowait(size_t size, const char *what); -int __must_check uds_reallocate_memory(void *ptr, size_t old_size, size_t size, +int __must_check vdo_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *what, void *new_ptr); -int __must_check uds_duplicate_string(const char *string, const char *what, +int __must_check vdo_duplicate_string(const char *string, const char *what, char **new_string); -/* Free memory allocated with uds_allocate(). */ -void uds_free(void *ptr); +/* Free memory allocated with vdo_allocate(). */ +void vdo_free(void *ptr); -static inline void *__uds_forget(void **ptr_ptr) +static inline void *__vdo_forget(void **ptr_ptr) { void *ptr = *ptr_ptr; @@ -144,19 +144,19 @@ static inline void *__uds_forget(void **ptr_ptr) * Null out a pointer and return a copy to it. This macro should be used when passing a pointer to * a function for which it is not safe to access the pointer once the function returns. */ -#define uds_forget(ptr) __uds_forget((void **) &(ptr)) +#define vdo_forget(ptr) __vdo_forget((void **) &(ptr)) -void uds_memory_init(void); +void vdo_memory_init(void); -void uds_memory_exit(void); +void vdo_memory_exit(void); -void uds_register_allocating_thread(struct registered_thread *new_thread, +void vdo_register_allocating_thread(struct registered_thread *new_thread, const bool *flag_ptr); -void uds_unregister_allocating_thread(void); +void vdo_unregister_allocating_thread(void); -void uds_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used); +void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used); -void uds_report_memory_usage(void); +void vdo_report_memory_usage(void); -#endif /* UDS_MEMORY_ALLOC_H */ +#endif /* VDO_MEMORY_ALLOC_H */ diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c index cac7232f467b..ec24fff2a21b 100644 --- a/drivers/md/dm-vdo/message-stats.c +++ b/drivers/md/dm-vdo/message-stats.c @@ -419,7 +419,7 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen) struct vdo_statistics *stats; int result; - result = uds_allocate(1, struct vdo_statistics, __func__, &stats); + result = vdo_allocate(1, struct vdo_statistics, __func__, &stats); if (result != UDS_SUCCESS) { uds_log_error("Cannot allocate memory to write VDO statistics"); return result; @@ -427,6 +427,6 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen) vdo_fetch_statistics(vdo, stats); write_vdo_statistics(NULL, stats, NULL, &buf, &maxlen); - uds_free(stats); + vdo_free(stats); return VDO_SUCCESS; } diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index 3283c8d56c59..5774d8fd5c5a 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -120,7 +120,7 @@ static int __must_check make_bin(struct packer *packer) struct packer_bin *bin; int result; - result = uds_allocate_extended(struct packer_bin, VDO_MAX_COMPRESSION_SLOTS, + result = vdo_allocate_extended(struct packer_bin, VDO_MAX_COMPRESSION_SLOTS, struct vio *, __func__, &bin); if (result != VDO_SUCCESS) return result; @@ -146,7 +146,7 @@ int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **pa block_count_t i; int result; - result = uds_allocate(1, struct packer, __func__, &packer); + result = vdo_allocate(1, struct packer, __func__, &packer); if (result != VDO_SUCCESS) return result; @@ -168,7 +168,7 @@ int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **pa * bin must have a canceler for which it is waiting, and any canceler will only have * canceled one lock holder at a time. */ - result = uds_allocate_extended(struct packer_bin, MAXIMUM_VDO_USER_VIOS / 2, + result = vdo_allocate_extended(struct packer_bin, MAXIMUM_VDO_USER_VIOS / 2, struct vio *, __func__, &packer->canceled_bin); if (result != VDO_SUCCESS) { vdo_free_packer(packer); @@ -198,11 +198,11 @@ void vdo_free_packer(struct packer *packer) list_for_each_entry_safe(bin, tmp, &packer->bins, list) { list_del_init(&bin->list); - uds_free(bin); + vdo_free(bin); } - uds_free(uds_forget(packer->canceled_bin)); - uds_free(packer); + vdo_free(vdo_forget(packer->canceled_bin)); + vdo_free(packer); } /** @@ -669,7 +669,7 @@ void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion) assert_data_vio_in_packer_zone(data_vio); - lock_holder = uds_forget(data_vio->compression.lock_holder); + lock_holder = vdo_forget(data_vio->compression.lock_holder); bin = lock_holder->compression.bin; ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index 62d142b28282..fadcea23288e 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -239,7 +239,7 @@ static int make_pbn_lock_pool(size_t capacity, struct pbn_lock_pool **pool_ptr) struct pbn_lock_pool *pool; int result; - result = uds_allocate_extended(struct pbn_lock_pool, capacity, idle_pbn_lock, + result = vdo_allocate_extended(struct pbn_lock_pool, capacity, idle_pbn_lock, __func__, &pool); if (result != VDO_SUCCESS) return result; @@ -270,7 +270,7 @@ static void free_pbn_lock_pool(struct pbn_lock_pool *pool) ASSERT_LOG_ONLY(pool->borrowed == 0, "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", pool->borrowed); - uds_free(pool); + vdo_free(pool); } /** @@ -344,7 +344,7 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones) zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; result = vdo_make_default_thread(vdo, zone->thread_id); if (result != VDO_SUCCESS) { - free_pbn_lock_pool(uds_forget(zone->lock_pool)); + free_pbn_lock_pool(vdo_forget(zone->lock_pool)); vdo_int_map_free(zone->pbn_operations); return result; } @@ -367,7 +367,7 @@ int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr) if (zone_count == 0) return VDO_SUCCESS; - result = uds_allocate_extended(struct physical_zones, zone_count, + result = vdo_allocate_extended(struct physical_zones, zone_count, struct physical_zone, __func__, &zones); if (result != VDO_SUCCESS) return result; @@ -398,11 +398,11 @@ void vdo_free_physical_zones(struct physical_zones *zones) for (index = 0; index < zones->zone_count; index++) { struct physical_zone *zone = &zones->zones[index]; - free_pbn_lock_pool(uds_forget(zone->lock_pool)); - vdo_int_map_free(uds_forget(zone->pbn_operations)); + free_pbn_lock_pool(vdo_forget(zone->lock_pool)); + vdo_int_map_free(vdo_forget(zone->pbn_operations)); } - uds_free(zones); + vdo_free(zones); } /** @@ -460,7 +460,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, if (lock != NULL) { /* The lock is already held, so we don't need the borrowed one. */ - return_pbn_lock_to_pool(zone->lock_pool, uds_forget(new_lock)); + return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock)); result = ASSERT(lock->holder_count > 0, "physical block %llu lock held", (unsigned long long) pbn); if (result != VDO_SUCCESS) diff --git a/drivers/md/dm-vdo/pool-sysfs.c b/drivers/md/dm-vdo/pool-sysfs.c index f2be0f2bbd68..6769c5711cbc 100644 --- a/drivers/md/dm-vdo/pool-sysfs.c +++ b/drivers/md/dm-vdo/pool-sysfs.c @@ -110,7 +110,7 @@ static ssize_t pool_requests_maximum_show(struct vdo *vdo, char *buf) static void vdo_pool_release(struct kobject *directory) { - uds_free(container_of(directory, struct vdo, vdo_directory)); + vdo_free(container_of(directory, struct vdo, vdo_directory)); } static struct pool_attribute vdo_pool_compressing_attr = { diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c index a59e9d40ca90..fc99268d2437 100644 --- a/drivers/md/dm-vdo/priority-table.c +++ b/drivers/md/dm-vdo/priority-table.c @@ -60,7 +60,7 @@ int vdo_make_priority_table(unsigned int max_priority, struct priority_table **t if (max_priority > MAX_PRIORITY) return UDS_INVALID_ARGUMENT; - result = uds_allocate_extended(struct priority_table, max_priority + 1, + result = vdo_allocate_extended(struct priority_table, max_priority + 1, struct bucket, __func__, &table); if (result != VDO_SUCCESS) return result; @@ -96,7 +96,7 @@ void vdo_free_priority_table(struct priority_table *table) */ vdo_reset_priority_table(table); - uds_free(table); + vdo_free(table); } /** diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index c1d355346bcf..615755697e60 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -591,31 +591,31 @@ static int __must_check initialize_lock_counter(struct recovery_journal *journal struct thread_config *config = &vdo->thread_config; struct lock_counter *counter = &journal->lock_counter; - result = uds_allocate(journal->size, u16, __func__, &counter->journal_counters); + result = vdo_allocate(journal->size, u16, __func__, &counter->journal_counters); if (result != VDO_SUCCESS) return result; - result = uds_allocate(journal->size, atomic_t, __func__, + result = vdo_allocate(journal->size, atomic_t, __func__, &counter->journal_decrement_counts); if (result != VDO_SUCCESS) return result; - result = uds_allocate(journal->size * config->logical_zone_count, u16, __func__, + result = vdo_allocate(journal->size * config->logical_zone_count, u16, __func__, &counter->logical_counters); if (result != VDO_SUCCESS) return result; - result = uds_allocate(journal->size, atomic_t, __func__, + result = vdo_allocate(journal->size, atomic_t, __func__, &counter->logical_zone_counts); if (result != VDO_SUCCESS) return result; - result = uds_allocate(journal->size * config->physical_zone_count, u16, __func__, + result = vdo_allocate(journal->size * config->physical_zone_count, u16, __func__, &counter->physical_counters); if (result != VDO_SUCCESS) return result; - result = uds_allocate(journal->size, atomic_t, __func__, + result = vdo_allocate(journal->size, atomic_t, __func__, &counter->physical_zone_counts); if (result != VDO_SUCCESS) return result; @@ -670,14 +670,14 @@ static int initialize_recovery_block(struct vdo *vdo, struct recovery_journal *j * Allocate a full block for the journal block even though not all of the space is used * since the VIO needs to write a full disk block. */ - result = uds_allocate(VDO_BLOCK_SIZE, char, __func__, &data); + result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &data); if (result != VDO_SUCCESS) return result; result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL, VIO_PRIORITY_HIGH, block, 1, data, &block->vio); if (result != VDO_SUCCESS) { - uds_free(data); + vdo_free(data); return result; } @@ -709,7 +709,7 @@ int vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state, nonce_t struct recovery_journal *journal; int result; - result = uds_allocate_extended(struct recovery_journal, + result = vdo_allocate_extended(struct recovery_journal, RECOVERY_JOURNAL_RESERVED_BLOCKS, struct recovery_journal_block, __func__, &journal); @@ -787,13 +787,13 @@ void vdo_free_recovery_journal(struct recovery_journal *journal) if (journal == NULL) return; - uds_free(uds_forget(journal->lock_counter.logical_zone_counts)); - uds_free(uds_forget(journal->lock_counter.physical_zone_counts)); - uds_free(uds_forget(journal->lock_counter.journal_counters)); - uds_free(uds_forget(journal->lock_counter.journal_decrement_counts)); - uds_free(uds_forget(journal->lock_counter.logical_counters)); - uds_free(uds_forget(journal->lock_counter.physical_counters)); - free_vio(uds_forget(journal->flush_vio)); + vdo_free(vdo_forget(journal->lock_counter.logical_zone_counts)); + vdo_free(vdo_forget(journal->lock_counter.physical_zone_counts)); + vdo_free(vdo_forget(journal->lock_counter.journal_counters)); + vdo_free(vdo_forget(journal->lock_counter.journal_decrement_counts)); + vdo_free(vdo_forget(journal->lock_counter.logical_counters)); + vdo_free(vdo_forget(journal->lock_counter.physical_counters)); + free_vio(vdo_forget(journal->flush_vio)); /* * FIXME: eventually, the journal should be constructed in a quiescent state which @@ -810,11 +810,11 @@ void vdo_free_recovery_journal(struct recovery_journal *journal) for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) { struct recovery_journal_block *block = &journal->blocks[i]; - uds_free(uds_forget(block->vio.data)); + vdo_free(vdo_forget(block->vio.data)); free_vio_components(&block->vio); } - uds_free(journal); + vdo_free(journal); } /** diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index 847aca9fbe47..bfcdedeedb86 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -226,7 +226,7 @@ static void uninitialize_vios(struct repair_completion *repair) while (repair->vio_count > 0) free_vio_components(&repair->vios[--repair->vio_count]); - uds_free(uds_forget(repair->vios)); + vdo_free(vdo_forget(repair->vios)); } static void free_repair_completion(struct repair_completion *repair) @@ -241,9 +241,9 @@ static void free_repair_completion(struct repair_completion *repair) repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false; uninitialize_vios(repair); - uds_free(uds_forget(repair->journal_data)); - uds_free(uds_forget(repair->entries)); - uds_free(repair); + vdo_free(vdo_forget(repair->journal_data)); + vdo_free(vdo_forget(repair->entries)); + vdo_free(repair); } static void finish_repair(struct vdo_completion *completion) @@ -262,7 +262,7 @@ static void finish_repair(struct vdo_completion *completion) repair->highest_tail, repair->logical_blocks_used, repair->block_map_data_blocks); - free_repair_completion(uds_forget(repair)); + free_repair_completion(vdo_forget(repair)); if (vdo_state_requires_read_only_rebuild(vdo->load_state)) { uds_log_info("Read-only rebuild complete"); @@ -295,7 +295,7 @@ static void abort_repair(struct vdo_completion *completion) else uds_log_warning("Recovery aborted"); - free_repair_completion(uds_forget(repair)); + free_repair_completion(vdo_forget(repair)); vdo_continue_completion(parent, result); } @@ -1108,7 +1108,7 @@ static void recover_block_map(struct vdo_completion *completion) if (repair->block_map_entry_count == 0) { uds_log_info("Replaying 0 recovery entries into block map"); - uds_free(uds_forget(repair->journal_data)); + vdo_free(vdo_forget(repair->journal_data)); launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN); return; } @@ -1418,7 +1418,7 @@ static int parse_journal_for_rebuild(struct repair_completion *repair) * packed_recovery_journal_entry from every valid journal block. */ count = ((repair->highest_tail - repair->block_map_head + 1) * entries_per_block); - result = uds_allocate(count, struct numbered_block_mapping, __func__, + result = vdo_allocate(count, struct numbered_block_mapping, __func__, &repair->entries); if (result != VDO_SUCCESS) return result; @@ -1464,7 +1464,7 @@ static int extract_new_mappings(struct repair_completion *repair) * Allocate an array of numbered_block_mapping structs just large enough to transcribe * every packed_recovery_journal_entry from every valid journal block. */ - result = uds_allocate(repair->entry_count, struct numbered_block_mapping, + result = vdo_allocate(repair->entry_count, struct numbered_block_mapping, __func__, &repair->entries); if (result != VDO_SUCCESS) return result; @@ -1709,7 +1709,7 @@ void vdo_repair(struct vdo_completion *parent) uds_log_warning("Device was dirty, rebuilding reference counts"); } - result = uds_allocate_extended(struct repair_completion, page_count, + result = vdo_allocate_extended(struct repair_completion, page_count, struct vdo_page_completion, __func__, &repair); if (result != VDO_SUCCESS) { @@ -1723,12 +1723,12 @@ void vdo_repair(struct vdo_completion *parent) prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN); repair->page_count = page_count; - result = uds_allocate(remaining * VDO_BLOCK_SIZE, char, __func__, + result = vdo_allocate(remaining * VDO_BLOCK_SIZE, char, __func__, &repair->journal_data); if (abort_on_error(result, repair)) return; - result = uds_allocate(vio_count, struct vio, __func__, &repair->vios); + result = vdo_allocate(vio_count, struct vio, __func__, &repair->vios); if (abort_on_error(result, repair)) return; diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 8c6376e79a23..2d2cccf89edb 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -415,7 +415,7 @@ static void complete_reaping(struct vdo_completion *completion) struct slab_journal *journal = completion->parent; return_vio_to_pool(journal->slab->allocator->vio_pool, - vio_as_pooled_vio(as_vio(uds_forget(completion)))); + vio_as_pooled_vio(as_vio(vdo_forget(completion)))); finish_reaping(journal); reap_slab_journal(journal); } @@ -698,7 +698,7 @@ static void complete_write(struct vdo_completion *completion) sequence_number_t committed = get_committing_sequence_number(pooled); list_del_init(&pooled->list_entry); - return_vio_to_pool(journal->slab->allocator->vio_pool, uds_forget(pooled)); + return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled)); if (result != VDO_SUCCESS) { vio_record_metadata_io_error(as_vio(completion)); @@ -777,7 +777,7 @@ static void write_slab_journal_block(struct vdo_waiter *waiter, void *context) * The slab summary update does a flush which is sufficient to protect us from corruption * due to out of order slab journal, reference block, or block map writes. */ - vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio, + vdo_submit_metadata_vio(vdo_forget(vio), block_number, write_slab_journal_endio, complete_write, REQ_OP_WRITE); /* Since the write is submitted, the tail block structure can be reused. */ @@ -2367,7 +2367,7 @@ static int allocate_slab_counters(struct vdo_slab *slab) if (result != VDO_SUCCESS) return result; - result = uds_allocate(slab->reference_block_count, struct reference_block, + result = vdo_allocate(slab->reference_block_count, struct reference_block, __func__, &slab->reference_blocks); if (result != VDO_SUCCESS) return result; @@ -2377,10 +2377,10 @@ static int allocate_slab_counters(struct vdo_slab *slab) * so we can word-search even at the very end. */ bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD); - result = uds_allocate(bytes, vdo_refcount_t, "ref counts array", + result = vdo_allocate(bytes, vdo_refcount_t, "ref counts array", &slab->counters); if (result != UDS_SUCCESS) { - uds_free(uds_forget(slab->reference_blocks)); + vdo_free(vdo_forget(slab->reference_blocks)); return result; } @@ -2658,7 +2658,7 @@ static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubbe */ static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber) { - uds_free(uds_forget(scrubber->vio.data)); + vdo_free(vdo_forget(scrubber->vio.data)); free_vio_components(&scrubber->vio); } @@ -2679,7 +2679,7 @@ static void finish_scrubbing(struct slab_scrubber *scrubber, int result) if (scrubber->high_priority_only) { scrubber->high_priority_only = false; - vdo_fail_completion(uds_forget(scrubber->vio.completion.parent), result); + vdo_fail_completion(vdo_forget(scrubber->vio.completion.parent), result); } else if (done && (atomic_add_return(-1, &allocator->depot->zones_to_scrub) == 0)) { /* All of our slabs were scrubbed, and we're the last allocator to finish. */ enum vdo_state prior_state = @@ -3382,7 +3382,7 @@ static void finish_loading_allocator(struct vdo_completion *completion) vdo_get_admin_state_code(&allocator->state); if (allocator->eraser != NULL) - dm_kcopyd_client_destroy(uds_forget(allocator->eraser)); + dm_kcopyd_client_destroy(vdo_forget(allocator->eraser)); if (operation == VDO_ADMIN_STATE_LOADING_FOR_RECOVERY) { void *context = @@ -3485,7 +3485,7 @@ static int get_slab_statuses(struct block_allocator *allocator, struct slab_status *statuses; struct slab_iterator iterator = get_slab_iterator(allocator); - result = uds_allocate(allocator->slab_count, struct slab_status, __func__, + result = vdo_allocate(allocator->slab_count, struct slab_status, __func__, &statuses); if (result != VDO_SUCCESS) return result; @@ -3552,7 +3552,7 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator register_slab_for_scrubbing(slab, high_priority); } - uds_free(slab_statuses); + vdo_free(slab_statuses); return VDO_SUCCESS; } @@ -3648,11 +3648,11 @@ static void free_slab(struct vdo_slab *slab) return; list_del(&slab->allocq_entry); - uds_free(uds_forget(slab->journal.block)); - uds_free(uds_forget(slab->journal.locks)); - uds_free(uds_forget(slab->counters)); - uds_free(uds_forget(slab->reference_blocks)); - uds_free(slab); + vdo_free(vdo_forget(slab->journal.block)); + vdo_free(vdo_forget(slab->journal.locks)); + vdo_free(vdo_forget(slab->counters)); + vdo_free(vdo_forget(slab->reference_blocks)); + vdo_free(slab); } static int initialize_slab_journal(struct vdo_slab *slab) @@ -3661,12 +3661,12 @@ static int initialize_slab_journal(struct vdo_slab *slab) const struct slab_config *slab_config = &slab->allocator->depot->slab_config; int result; - result = uds_allocate(slab_config->slab_journal_blocks, struct journal_lock, + result = vdo_allocate(slab_config->slab_journal_blocks, struct journal_lock, __func__, &journal->locks); if (result != VDO_SUCCESS) return result; - result = uds_allocate(VDO_BLOCK_SIZE, char, "struct packed_slab_journal_block", + result = vdo_allocate(VDO_BLOCK_SIZE, char, "struct packed_slab_journal_block", (char **) &journal->block); if (result != VDO_SUCCESS) return result; @@ -3722,7 +3722,7 @@ static int __must_check make_slab(physical_block_number_t slab_origin, struct vdo_slab *slab; int result; - result = uds_allocate(1, struct vdo_slab, __func__, &slab); + result = vdo_allocate(1, struct vdo_slab, __func__, &slab); if (result != VDO_SUCCESS) return result; @@ -3779,7 +3779,7 @@ static int allocate_slabs(struct slab_depot *depot, slab_count_t slab_count) physical_block_number_t slab_origin; int result; - result = uds_allocate(slab_count, struct vdo_slab *, + result = vdo_allocate(slab_count, struct vdo_slab *, "slab pointer array", &depot->new_slabs); if (result != VDO_SUCCESS) return result; @@ -3821,10 +3821,10 @@ void vdo_abandon_new_slabs(struct slab_depot *depot) return; for (i = depot->slab_count; i < depot->new_slab_count; i++) - free_slab(uds_forget(depot->new_slabs[i])); + free_slab(vdo_forget(depot->new_slabs[i])); depot->new_slab_count = 0; depot->new_size = 0; - uds_free(uds_forget(depot->new_slabs)); + vdo_free(vdo_forget(depot->new_slabs)); } /** @@ -3934,7 +3934,7 @@ static int initialize_slab_scrubber(struct block_allocator *allocator) char *journal_data; int result; - result = uds_allocate(VDO_BLOCK_SIZE * slab_journal_size, + result = vdo_allocate(VDO_BLOCK_SIZE * slab_journal_size, char, __func__, &journal_data); if (result != VDO_SUCCESS) return result; @@ -3945,7 +3945,7 @@ static int initialize_slab_scrubber(struct block_allocator *allocator) allocator, slab_journal_size, journal_data, &scrubber->vio); if (result != VDO_SUCCESS) { - uds_free(journal_data); + vdo_free(journal_data); return result; } @@ -3968,7 +3968,7 @@ static int __must_check initialize_slab_summary_block(struct block_allocator *al struct slab_summary_block *block = &allocator->summary_blocks[index]; int result; - result = uds_allocate(VDO_BLOCK_SIZE, char, __func__, &block->outgoing_entries); + result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &block->outgoing_entries); if (result != VDO_SUCCESS) return result; @@ -4024,7 +4024,7 @@ static int __must_check initialize_block_allocator(struct slab_depot *depot, if (result != VDO_SUCCESS) return result; - result = uds_allocate(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE, + result = vdo_allocate(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE, struct slab_summary_block, __func__, &allocator->summary_blocks); if (result != VDO_SUCCESS) @@ -4084,7 +4084,7 @@ static int allocate_components(struct slab_depot *depot, depot->summary_origin = summary_partition->offset; depot->hint_shift = vdo_get_slab_summary_hint_shift(depot->slab_size_shift); - result = uds_allocate(MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES, + result = vdo_allocate(MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES, struct slab_summary_entry, __func__, &depot->summary_entries); if (result != VDO_SUCCESS) @@ -4172,7 +4172,7 @@ int vdo_decode_slab_depot(struct slab_depot_state_2_0 state, struct vdo *vdo, } slab_size_shift = ilog2(slab_size); - result = uds_allocate_extended(struct slab_depot, + result = vdo_allocate_extended(struct slab_depot, vdo->thread_config.physical_zone_count, struct block_allocator, __func__, &depot); if (result != VDO_SUCCESS) @@ -4205,10 +4205,10 @@ static void uninitialize_allocator_summary(struct block_allocator *allocator) for (i = 0; i < VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE; i++) { free_vio_components(&allocator->summary_blocks[i].vio); - uds_free(uds_forget(allocator->summary_blocks[i].outgoing_entries)); + vdo_free(vdo_forget(allocator->summary_blocks[i].outgoing_entries)); } - uds_free(uds_forget(allocator->summary_blocks)); + vdo_free(vdo_forget(allocator->summary_blocks)); } /** @@ -4228,25 +4228,25 @@ void vdo_free_slab_depot(struct slab_depot *depot) struct block_allocator *allocator = &depot->allocators[zone]; if (allocator->eraser != NULL) - dm_kcopyd_client_destroy(uds_forget(allocator->eraser)); + dm_kcopyd_client_destroy(vdo_forget(allocator->eraser)); uninitialize_allocator_summary(allocator); uninitialize_scrubber_vio(&allocator->scrubber); - free_vio_pool(uds_forget(allocator->vio_pool)); - vdo_free_priority_table(uds_forget(allocator->prioritized_slabs)); + free_vio_pool(vdo_forget(allocator->vio_pool)); + vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs)); } if (depot->slabs != NULL) { slab_count_t i; for (i = 0; i < depot->slab_count; i++) - free_slab(uds_forget(depot->slabs[i])); + free_slab(vdo_forget(depot->slabs[i])); } - uds_free(uds_forget(depot->slabs)); - uds_free(uds_forget(depot->action_manager)); - uds_free(uds_forget(depot->summary_entries)); - uds_free(depot); + vdo_free(vdo_forget(depot->slabs)); + vdo_free(vdo_forget(depot->action_manager)); + vdo_free(vdo_forget(depot->summary_entries)); + vdo_free(depot); } /** @@ -4447,7 +4447,7 @@ static void finish_combining_zones(struct vdo_completion *completion) int result = completion->result; struct vdo_completion *parent = completion->parent; - free_vio(as_vio(uds_forget(completion))); + free_vio(as_vio(vdo_forget(completion))); vdo_fail_completion(parent, result); } @@ -4708,7 +4708,7 @@ static int finish_registration(void *context) struct slab_depot *depot = context; WRITE_ONCE(depot->slab_count, depot->new_slab_count); - uds_free(depot->slabs); + vdo_free(depot->slabs); depot->slabs = depot->new_slabs; depot->new_slabs = NULL; depot->new_slab_count = 0; diff --git a/drivers/md/dm-vdo/slab-depot.h b/drivers/md/dm-vdo/slab-depot.h index fba293f9713e..f234853501ca 100644 --- a/drivers/md/dm-vdo/slab-depot.h +++ b/drivers/md/dm-vdo/slab-depot.h @@ -241,7 +241,7 @@ struct vdo_slab { /* The number of free blocks */ u32 free_blocks; /* The array of reference counts */ - vdo_refcount_t *counters; /* use uds_allocate() to align data ptr */ + vdo_refcount_t *counters; /* use vdo_allocate() to align data ptr */ /* The saved block pointer and array indexes for the free block search */ struct search_cursor search_cursor; diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c index 244abc6ad848..ad7682784459 100644 --- a/drivers/md/dm-vdo/thread-utils.c +++ b/drivers/md/dm-vdo/thread-utils.c @@ -66,9 +66,9 @@ static int thread_starter(void *arg) mutex_lock(&thread_mutex); hlist_add_head(&thread->thread_links, &thread_list); mutex_unlock(&thread_mutex); - uds_register_allocating_thread(&allocating_thread, NULL); + vdo_register_allocating_thread(&allocating_thread, NULL); thread->thread_function(thread->thread_data); - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); complete(&thread->thread_done); return 0; } @@ -82,7 +82,7 @@ int vdo_create_thread(void (*thread_function)(void *), void *thread_data, struct thread *thread; int result; - result = uds_allocate(1, struct thread, __func__, &thread); + result = vdo_allocate(1, struct thread, __func__, &thread); if (result != UDS_SUCCESS) { uds_log_warning("Error allocating memory for %s", name); return result; @@ -114,7 +114,7 @@ int vdo_create_thread(void (*thread_function)(void *), void *thread_data, } if (IS_ERR(task)) { - uds_free(thread); + vdo_free(thread); return PTR_ERR(task); } @@ -130,5 +130,5 @@ void vdo_join_threads(struct thread *thread) mutex_lock(&thread_mutex); hlist_del(&thread->thread_links); mutex_unlock(&thread_mutex); - uds_free(thread); + vdo_free(thread); } diff --git a/drivers/md/dm-vdo/uds-sysfs.c b/drivers/md/dm-vdo/uds-sysfs.c index 2c4fb277ba38..d1d5a30b3717 100644 --- a/drivers/md/dm-vdo/uds-sysfs.c +++ b/drivers/md/dm-vdo/uds-sysfs.c @@ -35,7 +35,7 @@ static char *buffer_to_string(const char *buf, size_t length) { char *string; - if (uds_allocate(length + 1, char, __func__, &string) != UDS_SUCCESS) + if (vdo_allocate(length + 1, char, __func__, &string) != UDS_SUCCESS) return NULL; memcpy(string, buf, length); @@ -118,7 +118,7 @@ static ssize_t parameter_store(struct kobject *kobj, struct attribute *attr, return -ENOMEM; pa->store_string(string); - uds_free(string); + vdo_free(string); return length; } diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 6baf319d79c6..ae62f260c5ec 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -134,13 +134,13 @@ static void start_vdo_request_queue(void *ptr) { struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue()); - uds_register_allocating_thread(&thread->allocating_thread, + vdo_register_allocating_thread(&thread->allocating_thread, &thread->vdo->allocations_allowed); } static void finish_vdo_request_queue(void *ptr) { - uds_unregister_allocating_thread(); + vdo_unregister_allocating_thread(); } #ifdef MODULE @@ -172,10 +172,10 @@ static const struct vdo_work_queue_type cpu_q_type = { static void uninitialize_thread_config(struct thread_config *config) { - uds_free(uds_forget(config->logical_threads)); - uds_free(uds_forget(config->physical_threads)); - uds_free(uds_forget(config->hash_zone_threads)); - uds_free(uds_forget(config->bio_threads)); + vdo_free(vdo_forget(config->logical_threads)); + vdo_free(vdo_forget(config->physical_threads)); + vdo_free(vdo_forget(config->hash_zone_threads)); + vdo_free(vdo_forget(config->bio_threads)); memset(config, 0, sizeof(struct thread_config)); } @@ -214,28 +214,28 @@ static int __must_check initialize_thread_config(struct thread_count_config coun config->hash_zone_count = counts.hash_zones; } - result = uds_allocate(config->logical_zone_count, thread_id_t, + result = vdo_allocate(config->logical_zone_count, thread_id_t, "logical thread array", &config->logical_threads); if (result != VDO_SUCCESS) { uninitialize_thread_config(config); return result; } - result = uds_allocate(config->physical_zone_count, thread_id_t, + result = vdo_allocate(config->physical_zone_count, thread_id_t, "physical thread array", &config->physical_threads); if (result != VDO_SUCCESS) { uninitialize_thread_config(config); return result; } - result = uds_allocate(config->hash_zone_count, thread_id_t, + result = vdo_allocate(config->hash_zone_count, thread_id_t, "hash thread array", &config->hash_zone_threads); if (result != VDO_SUCCESS) { uninitialize_thread_config(config); return result; } - result = uds_allocate(config->bio_thread_count, thread_id_t, + result = vdo_allocate(config->bio_thread_count, thread_id_t, "bio thread array", &config->bio_threads); if (result != VDO_SUCCESS) { uninitialize_thread_config(config); @@ -276,14 +276,14 @@ static int __must_check read_geometry_block(struct vdo *vdo) char *block; int result; - result = uds_allocate(VDO_BLOCK_SIZE, u8, __func__, &block); + result = vdo_allocate(VDO_BLOCK_SIZE, u8, __func__, &block); if (result != VDO_SUCCESS) return result; result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL, block, &vio); if (result != VDO_SUCCESS) { - uds_free(block); + vdo_free(block); return result; } @@ -295,23 +295,23 @@ static int __must_check read_geometry_block(struct vdo *vdo) result = vio_reset_bio(vio, block, NULL, REQ_OP_READ, VDO_GEOMETRY_BLOCK_LOCATION); if (result != VDO_SUCCESS) { - free_vio(uds_forget(vio)); - uds_free(block); + free_vio(vdo_forget(vio)); + vdo_free(block); return result; } bio_set_dev(vio->bio, vdo_get_backing_device(vdo)); submit_bio_wait(vio->bio); result = blk_status_to_errno(vio->bio->bi_status); - free_vio(uds_forget(vio)); + free_vio(vdo_forget(vio)); if (result != 0) { uds_log_error_strerror(result, "synchronous read failed"); - uds_free(block); + vdo_free(block); return -EIO; } result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry); - uds_free(block); + vdo_free(block); return result; } @@ -500,7 +500,7 @@ static int initialize_vdo(struct vdo *vdo, struct device_config *config, config->thread_counts.hash_zones, vdo->thread_config.thread_count); /* Compression context storage */ - result = uds_allocate(config->thread_counts.cpu_threads, char *, "LZ4 context", + result = vdo_allocate(config->thread_counts.cpu_threads, char *, "LZ4 context", &vdo->compression_context); if (result != VDO_SUCCESS) { *reason = "cannot allocate LZ4 context"; @@ -508,7 +508,7 @@ static int initialize_vdo(struct vdo *vdo, struct device_config *config, } for (i = 0; i < config->thread_counts.cpu_threads; i++) { - result = uds_allocate(LZ4_MEM_COMPRESS, char, "LZ4 context", + result = vdo_allocate(LZ4_MEM_COMPRESS, char, "LZ4 context", &vdo->compression_context[i]); if (result != VDO_SUCCESS) { *reason = "cannot allocate LZ4 context"; @@ -544,7 +544,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason, /* Initialize with a generic failure reason to prevent returning garbage. */ *reason = "Unspecified error"; - result = uds_allocate(1, struct vdo, __func__, &vdo); + result = vdo_allocate(1, struct vdo, __func__, &vdo); if (result != UDS_SUCCESS) { *reason = "Cannot allocate VDO"; return result; @@ -562,7 +562,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason, snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix), "%s%u", MODULE_NAME, instance); BUG_ON(vdo->thread_name_prefix[0] == '\0'); - result = uds_allocate(vdo->thread_config.thread_count, + result = vdo_allocate(vdo->thread_config.thread_count, struct vdo_thread, __func__, &vdo->threads); if (result != VDO_SUCCESS) { *reason = "Cannot allocate thread structures"; @@ -650,16 +650,16 @@ static void free_listeners(struct vdo_thread *thread) { struct read_only_listener *listener, *next; - for (listener = uds_forget(thread->listeners); listener != NULL; listener = next) { - next = uds_forget(listener->next); - uds_free(listener); + for (listener = vdo_forget(thread->listeners); listener != NULL; listener = next) { + next = vdo_forget(listener->next); + vdo_free(listener); } } static void uninitialize_super_block(struct vdo_super_block *super_block) { free_vio_components(&super_block->vio); - uds_free(super_block->buffer); + vdo_free(super_block->buffer); } /** @@ -701,36 +701,36 @@ void vdo_destroy(struct vdo *vdo) finish_vdo(vdo); unregister_vdo(vdo); free_data_vio_pool(vdo->data_vio_pool); - vdo_free_io_submitter(uds_forget(vdo->io_submitter)); - vdo_free_flusher(uds_forget(vdo->flusher)); - vdo_free_packer(uds_forget(vdo->packer)); - vdo_free_recovery_journal(uds_forget(vdo->recovery_journal)); - vdo_free_slab_depot(uds_forget(vdo->depot)); + vdo_free_io_submitter(vdo_forget(vdo->io_submitter)); + vdo_free_flusher(vdo_forget(vdo->flusher)); + vdo_free_packer(vdo_forget(vdo->packer)); + vdo_free_recovery_journal(vdo_forget(vdo->recovery_journal)); + vdo_free_slab_depot(vdo_forget(vdo->depot)); vdo_uninitialize_layout(&vdo->layout); vdo_uninitialize_layout(&vdo->next_layout); if (vdo->partition_copier) - dm_kcopyd_client_destroy(uds_forget(vdo->partition_copier)); + dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier)); uninitialize_super_block(&vdo->super_block); - vdo_free_block_map(uds_forget(vdo->block_map)); - vdo_free_hash_zones(uds_forget(vdo->hash_zones)); - vdo_free_physical_zones(uds_forget(vdo->physical_zones)); - vdo_free_logical_zones(uds_forget(vdo->logical_zones)); + vdo_free_block_map(vdo_forget(vdo->block_map)); + vdo_free_hash_zones(vdo_forget(vdo->hash_zones)); + vdo_free_physical_zones(vdo_forget(vdo->physical_zones)); + vdo_free_logical_zones(vdo_forget(vdo->logical_zones)); if (vdo->threads != NULL) { for (i = 0; i < vdo->thread_config.thread_count; i++) { free_listeners(&vdo->threads[i]); - vdo_free_work_queue(uds_forget(vdo->threads[i].queue)); + vdo_free_work_queue(vdo_forget(vdo->threads[i].queue)); } - uds_free(uds_forget(vdo->threads)); + vdo_free(vdo_forget(vdo->threads)); } uninitialize_thread_config(&vdo->thread_config); if (vdo->compression_context != NULL) { for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++) - uds_free(uds_forget(vdo->compression_context[i])); + vdo_free(vdo_forget(vdo->compression_context[i])); - uds_free(uds_forget(vdo->compression_context)); + vdo_free(vdo_forget(vdo->compression_context)); } /* @@ -738,7 +738,7 @@ void vdo_destroy(struct vdo *vdo) * the count goes to zero the VDO object will be freed as a side effect. */ if (!vdo->sysfs_added) - uds_free(vdo); + vdo_free(vdo); else kobject_put(&vdo->vdo_directory); } @@ -747,7 +747,7 @@ static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super { int result; - result = uds_allocate(VDO_BLOCK_SIZE, char, "encoded super block", + result = vdo_allocate(VDO_BLOCK_SIZE, char, "encoded super block", (char **) &vdo->super_block.buffer); if (result != VDO_SUCCESS) return result; @@ -769,7 +769,7 @@ static void finish_reading_super_block(struct vdo_completion *completion) struct vdo_super_block *super_block = container_of(as_vio(completion), struct vdo_super_block, vio); - vdo_continue_completion(uds_forget(completion->parent), + vdo_continue_completion(vdo_forget(completion->parent), vdo_decode_super_block(super_block->buffer)); } @@ -965,7 +965,7 @@ static void record_vdo(struct vdo *vdo) */ static void continue_super_block_parent(struct vdo_completion *completion) { - vdo_continue_completion(uds_forget(completion->parent), completion->result); + vdo_continue_completion(vdo_forget(completion->parent), completion->result); } /** @@ -1055,7 +1055,7 @@ int vdo_register_read_only_listener(struct vdo *vdo, void *listener, if (result != VDO_SUCCESS) return result; - result = uds_allocate(1, struct read_only_listener, __func__, + result = vdo_allocate(1, struct read_only_listener, __func__, &read_only_listener); if (result != VDO_SUCCESS) return result; @@ -1184,7 +1184,7 @@ static void finish_entering_read_only_mode(struct vdo_completion *completion) spin_unlock(¬ifier->lock); if (notifier->waiter != NULL) - vdo_continue_completion(uds_forget(notifier->waiter), + vdo_continue_completion(vdo_forget(notifier->waiter), completion->result); } @@ -1621,7 +1621,7 @@ static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *sta copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial); stats->bios_in_progress = subtract_bio_stats(stats->bios_in, stats->bios_acknowledged); - uds_get_memory_stats(&stats->memory_usage.bytes_used, + vdo_get_memory_stats(&stats->memory_usage.bytes_used, &stats->memory_usage.peak_bytes_used); } diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index 4832ea46551f..83c36f7590de 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -52,7 +52,7 @@ static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr) struct bio *bio = NULL; int result; - result = uds_allocate_extended(struct bio, size + 1, struct bio_vec, + result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec, "bio", &bio); if (result != VDO_SUCCESS) return result; @@ -72,7 +72,7 @@ void vdo_free_bio(struct bio *bio) return; bio_uninit(bio); - uds_free(uds_forget(bio)); + vdo_free(vdo_forget(bio)); } int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, @@ -129,7 +129,7 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, * Metadata vios should use direct allocation and not use the buffer pool, which is * reserved for submissions from the linux block layer. */ - result = uds_allocate(1, struct vio, __func__, &vio); + result = vdo_allocate(1, struct vio, __func__, &vio); if (result != VDO_SUCCESS) { uds_log_error("metadata vio allocation failure %d", result); return result; @@ -138,7 +138,7 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, result = allocate_vio_components(vdo, vio_type, priority, parent, block_count, data, vio); if (result != VDO_SUCCESS) { - uds_free(vio); + vdo_free(vio); return result; } @@ -156,7 +156,7 @@ void free_vio_components(struct vio *vio) return; BUG_ON(is_data_vio(vio)); - vdo_free_bio(uds_forget(vio->bio)); + vdo_free_bio(vdo_forget(vio->bio)); } /** @@ -166,7 +166,7 @@ void free_vio_components(struct vio *vio) void free_vio(struct vio *vio) { free_vio_components(vio); - uds_free(vio); + vdo_free(vio); } /* Set bio properties for a VDO read or write. */ @@ -316,7 +316,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id, char *ptr; int result; - result = uds_allocate_extended(struct vio_pool, pool_size, struct pooled_vio, + result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio, __func__, &pool); if (result != VDO_SUCCESS) return result; @@ -325,7 +325,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id, INIT_LIST_HEAD(&pool->available); INIT_LIST_HEAD(&pool->busy); - result = uds_allocate(pool_size * VDO_BLOCK_SIZE, char, + result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char, "VIO pool buffer", &pool->buffer); if (result != VDO_SUCCESS) { free_vio_pool(pool); @@ -380,8 +380,8 @@ void free_vio_pool(struct vio_pool *pool) ASSERT_LOG_ONLY(pool->size == 0, "VIO pool must not have missing entries when being freed"); - uds_free(uds_forget(pool->buffer)); - uds_free(pool); + vdo_free(vdo_forget(pool->buffer)); + vdo_free(pool); } /** -- cgit v1.2.3 From 2de70388b3751e8cd6727441330978e69a578e0c Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 13 Feb 2024 12:06:53 -0600 Subject: dm vdo: check for VDO_SUCCESS return value from memory-alloc functions VDO_SUCCESS and UDS_SUCCESS were used interchangably, update all callers of VDO's memory-alloc functions to consistently check for VDO_SUCCESS. Signed-off-by: Mike Snitzer Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/block-map.c | 6 +++--- drivers/md/dm-vdo/data-vio.c | 2 +- drivers/md/dm-vdo/dm-vdo-target.c | 18 +++++++++--------- drivers/md/dm-vdo/encodings.c | 2 +- drivers/md/dm-vdo/funnel-queue.c | 2 +- drivers/md/dm-vdo/funnel-workqueue.c | 6 +++--- drivers/md/dm-vdo/indexer/chapter-index.c | 2 +- drivers/md/dm-vdo/indexer/config.c | 2 +- drivers/md/dm-vdo/indexer/delta-index.c | 10 +++++----- drivers/md/dm-vdo/indexer/funnel-requestqueue.c | 2 +- drivers/md/dm-vdo/indexer/geometry.c | 2 +- drivers/md/dm-vdo/indexer/index-layout.c | 18 +++++++++--------- drivers/md/dm-vdo/indexer/index-page-map.c | 8 ++++---- drivers/md/dm-vdo/indexer/index-session.c | 2 +- drivers/md/dm-vdo/indexer/index.c | 12 ++++++------ drivers/md/dm-vdo/indexer/io-factory.c | 6 +++--- drivers/md/dm-vdo/indexer/open-chapter.c | 4 ++-- drivers/md/dm-vdo/indexer/radix-sort.c | 2 +- drivers/md/dm-vdo/indexer/sparse-cache.c | 8 ++++---- drivers/md/dm-vdo/indexer/volume-index.c | 6 +++--- drivers/md/dm-vdo/indexer/volume.c | 14 +++++++------- drivers/md/dm-vdo/int-map.c | 18 +++++++++--------- drivers/md/dm-vdo/io-submitter.c | 2 +- drivers/md/dm-vdo/message-stats.c | 2 +- drivers/md/dm-vdo/slab-depot.c | 2 +- drivers/md/dm-vdo/thread-utils.c | 2 +- drivers/md/dm-vdo/uds-sysfs.c | 2 +- drivers/md/dm-vdo/vdo.c | 2 +- 28 files changed, 82 insertions(+), 82 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index b09974ad41d2..c4719fb30f86 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -223,11 +223,11 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache) result = vdo_allocate(cache->page_count, struct page_info, "page infos", &cache->infos); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_int_map_create(cache->page_count, &cache->page_map); @@ -2874,7 +2874,7 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical result = vdo_allocate_extended(struct block_map, vdo->thread_config.logical_zone_count, struct block_map_zone, __func__, &map); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; map->vdo = vdo; diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index dcdd767e40e5..3d5054e61330 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -847,7 +847,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, result = vdo_allocate_extended(struct data_vio_pool, pool_size, struct data_vio, __func__, &pool); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; ASSERT_LOG_ONLY((discard_limit <= pool_size), diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 86c30fbd75ca..90ba379f8d3e 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -280,7 +280,7 @@ static int split_string(const char *string, char separator, char ***substring_ar result = vdo_allocate(substring_count + 1, char *, "string-splitting array", &substrings); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; for (s = string; *s != 0; s++) { @@ -289,7 +289,7 @@ static int split_string(const char *string, char separator, char ***substring_ar result = vdo_allocate(length + 1, char, "split string", &substrings[current_substring]); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { free_string_array(substrings); return result; } @@ -310,7 +310,7 @@ static int split_string(const char *string, char separator, char ***substring_ar result = vdo_allocate(length + 1, char, "split string", &substrings[current_substring]); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { free_string_array(substrings); return result; } @@ -1527,7 +1527,7 @@ static size_t get_bit_array_size(unsigned int bit_count) * Since the array is initially NULL, this also initializes the array the first time we allocate an * instance number. * - * Return: UDS_SUCCESS or an error code from the allocation + * Return: VDO_SUCCESS or an error code from the allocation */ static int grow_bit_array(void) { @@ -1540,19 +1540,19 @@ static int grow_bit_array(void) get_bit_array_size(instances.bit_count), get_bit_array_size(new_count), "instance number bit array", &new_words); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; instances.bit_count = new_count; instances.words = new_words; - return UDS_SUCCESS; + return VDO_SUCCESS; } /** * allocate_instance() - Allocate an instance number. * @instance_ptr: A point to hold the instance number * - * Return: UDS_SUCCESS or an error code + * Return: VDO_SUCCESS or an error code * * This function must be called while holding the instances lock. */ @@ -1564,7 +1564,7 @@ static int allocate_instance(unsigned int *instance_ptr) /* If there are no unallocated instances, grow the bit array. */ if (instances.count >= instances.bit_count) { result = grow_bit_array(); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; } @@ -1587,7 +1587,7 @@ static int allocate_instance(unsigned int *instance_ptr) instances.count++; instances.next = instance + 1; *instance_ptr = instance; - return UDS_SUCCESS; + return VDO_SUCCESS; } static int construct_new_vdo_registered(struct dm_target *ti, unsigned int argc, diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index 56d94339d2af..a97771fe0a43 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -800,7 +800,7 @@ static int allocate_partition(struct layout *layout, u8 id, int result; result = vdo_allocate(1, struct partition, __func__, &partition); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; partition->id = id; diff --git a/drivers/md/dm-vdo/funnel-queue.c b/drivers/md/dm-vdo/funnel-queue.c index 67f7b52ecc86..ce0e801fd955 100644 --- a/drivers/md/dm-vdo/funnel-queue.c +++ b/drivers/md/dm-vdo/funnel-queue.c @@ -15,7 +15,7 @@ int uds_make_funnel_queue(struct funnel_queue **queue_ptr) struct funnel_queue *queue; result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index ebf8dce67086..a88f5c93eae5 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -324,7 +324,7 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na VDO_WORK_Q_MAX_PRIORITY); result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; queue->private = private; @@ -401,12 +401,12 @@ int vdo_make_work_queue(const char *thread_name_prefix, const char *name, result = vdo_allocate(1, struct round_robin_work_queue, "round-robin work queue", &queue); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate(thread_count, struct simple_work_queue *, "subordinate work queues", &queue->service_queues); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { vdo_free(queue); return result; } diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c index 9477150362ae..68d86028dbb7 100644 --- a/drivers/md/dm-vdo/indexer/chapter-index.c +++ b/drivers/md/dm-vdo/indexer/chapter-index.c @@ -21,7 +21,7 @@ int uds_make_open_chapter_index(struct open_chapter_index **chapter_index, struct open_chapter_index *index; result = vdo_allocate(1, struct open_chapter_index, "open chapter index", &index); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c index cd20ee8b9a02..5da39043b9ae 100644 --- a/drivers/md/dm-vdo/indexer/config.c +++ b/drivers/md/dm-vdo/indexer/config.c @@ -326,7 +326,7 @@ int uds_make_configuration(const struct uds_parameters *params, return result; result = vdo_allocate(1, struct uds_configuration, __func__, &config); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = uds_make_index_geometry(DEFAULT_BYTES_PER_PAGE, record_pages_per_chapter, diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c index 11f7b85b6710..5bba9a48c5a0 100644 --- a/drivers/md/dm-vdo/indexer/delta-index.c +++ b/drivers/md/dm-vdo/indexer/delta-index.c @@ -312,18 +312,18 @@ static int initialize_delta_zone(struct delta_zone *delta_zone, size_t size, int result; result = vdo_allocate(size, u8, "delta list", &delta_zone->memory); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate(list_count + 2, u64, "delta list temp", &delta_zone->new_offsets); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* Allocate the delta lists. */ result = vdo_allocate(list_count + 2, struct delta_list, "delta lists", &delta_zone->delta_lists); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; compute_coding_constants(mean_delta, &delta_zone->min_bits, @@ -354,7 +354,7 @@ int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zon result = vdo_allocate(zone_count, struct delta_zone, "Delta Index Zones", &delta_index->delta_zones); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; delta_index->zone_count = zone_count; @@ -1048,7 +1048,7 @@ int uds_finish_restoring_delta_index(struct delta_index *delta_index, u8 *data; result = vdo_allocate(DELTA_LIST_MAX_BYTE_COUNT, u8, __func__, &data); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; for (z = 0; z < reader_count; z++) { diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c index 95a402ec31c9..eee7b980960f 100644 --- a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c +++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c @@ -199,7 +199,7 @@ int uds_make_request_queue(const char *queue_name, struct uds_request_queue *queue; result = vdo_allocate(1, struct uds_request_queue, __func__, &queue); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; queue->processor = processor; diff --git a/drivers/md/dm-vdo/indexer/geometry.c b/drivers/md/dm-vdo/indexer/geometry.c index c735e6cb4425..c0575612e820 100644 --- a/drivers/md/dm-vdo/indexer/geometry.c +++ b/drivers/md/dm-vdo/indexer/geometry.c @@ -62,7 +62,7 @@ int uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter, struct index_geometry *geometry; result = vdo_allocate(1, struct index_geometry, "geometry", &geometry); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; geometry->bytes_per_page = bytes_per_page; diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index c1bcff03cc55..01e0db4184aa 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -487,7 +487,7 @@ static int __must_check make_index_save_region_table(struct index_save_layout *i result = vdo_allocate_extended(struct region_table, region_count, struct layout_region, "layout region table for ISL", &table); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; lr = &table->regions[0]; @@ -546,7 +546,7 @@ static int __must_check write_index_save_header(struct index_save_layout *isl, size_t offset = 0; result = vdo_allocate(table->encoded_size, u8, "index save data", &buffer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; encode_region_table(buffer, &offset, table); @@ -670,7 +670,7 @@ static int __must_check make_layout_region_table(struct index_layout *layout, result = vdo_allocate_extended(struct region_table, region_count, struct layout_region, "layout region table", &table); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; lr = &table->regions[0]; @@ -716,7 +716,7 @@ static int __must_check write_layout_header(struct index_layout *layout, size_t offset = 0; result = vdo_allocate(table->encoded_size, u8, "layout data", &buffer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; encode_region_table(buffer, &offset, table); @@ -807,7 +807,7 @@ static int create_index_layout(struct index_layout *layout, struct uds_configura result = vdo_allocate(sizes.save_count, struct index_save_layout, __func__, &layout->index.saves); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; initialize_layout(layout, &sizes); @@ -1165,7 +1165,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, result = vdo_allocate_extended(struct region_table, header.region_count, struct layout_region, "single file layout region table", &table); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; table->header = header; @@ -1202,7 +1202,7 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, size_t offset = 0; result = vdo_allocate(saved_size, u8, "super block data", &buffer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = uds_read_from_buffered_reader(reader, buffer, saved_size); @@ -1337,7 +1337,7 @@ static int __must_check reconstitute_layout(struct index_layout *layout, result = vdo_allocate(layout->super.max_saves, struct index_save_layout, __func__, &layout->index.saves); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; layout->total_blocks = table->header.region_blocks; @@ -1696,7 +1696,7 @@ int uds_make_index_layout(struct uds_configuration *config, bool new_layout, return result; result = vdo_allocate(1, struct index_layout, __func__, &layout); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = create_layout_factory(layout, config); diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c index ddb6d843cbd9..c5d1b9995846 100644 --- a/drivers/md/dm-vdo/indexer/index-page-map.c +++ b/drivers/md/dm-vdo/indexer/index-page-map.c @@ -39,14 +39,14 @@ int uds_make_index_page_map(const struct index_geometry *geometry, struct index_page_map *map; result = vdo_allocate(1, struct index_page_map, "page map", &map); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; map->geometry = geometry; map->entries_per_chapter = geometry->index_pages_per_chapter - 1; result = vdo_allocate(get_entry_count(geometry), u16, "Index Page Map Entries", &map->entries); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_index_page_map(map); return result; } @@ -119,7 +119,7 @@ int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer u32 i; result = vdo_allocate(saved_size, u8, "page map data", &buffer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; memcpy(buffer, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH); @@ -146,7 +146,7 @@ int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader * u32 i; result = vdo_allocate(saved_size, u8, "page map data", &buffer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = uds_read_from_buffered_reader(reader, buffer, saved_size); diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c index 0f920a583021..22445dcb3fe0 100644 --- a/drivers/md/dm-vdo/indexer/index-session.c +++ b/drivers/md/dm-vdo/indexer/index-session.c @@ -222,7 +222,7 @@ static int __must_check make_empty_index_session(struct uds_index_session **inde struct uds_index_session *session; result = vdo_allocate(1, struct uds_index_session, __func__, &session); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; mutex_init(&session->request_mutex); diff --git a/drivers/md/dm-vdo/indexer/index.c b/drivers/md/dm-vdo/indexer/index.c index c576033b8a53..243a9deab4de 100644 --- a/drivers/md/dm-vdo/indexer/index.c +++ b/drivers/md/dm-vdo/indexer/index.c @@ -89,7 +89,7 @@ static int launch_zone_message(struct uds_zone_message message, unsigned int zon struct uds_request *request; result = vdo_allocate(1, struct uds_request, __func__, &request); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; request->index = index; @@ -770,7 +770,7 @@ static int make_chapter_writer(struct uds_index *index, result = vdo_allocate_extended(struct chapter_writer, index->zone_count, struct open_chapter_zone *, "Chapter Writer", &writer); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; writer->index = index; @@ -779,7 +779,7 @@ static int make_chapter_writer(struct uds_index *index, result = vdo_allocate_cache_aligned(collated_records_size, "collated records", &writer->collated_records); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { free_chapter_writer(writer); return result; } @@ -1127,7 +1127,7 @@ static int make_index_zone(struct uds_index *index, unsigned int zone_number) struct index_zone *zone; result = vdo_allocate(1, struct index_zone, "index zone", &zone); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = uds_make_open_chapter(index->volume->geometry, index->zone_count, @@ -1165,7 +1165,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op result = vdo_allocate_extended(struct uds_index, config->zone_count, struct uds_request_queue *, "index", &index); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; index->zone_count = config->zone_count; @@ -1178,7 +1178,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op result = vdo_allocate(index->zone_count, struct index_zone *, "zones", &index->zones); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_index(index); return result; } diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c index 749c950c0189..0dcf6d596653 100644 --- a/drivers/md/dm-vdo/indexer/io-factory.c +++ b/drivers/md/dm-vdo/indexer/io-factory.c @@ -65,7 +65,7 @@ int uds_make_io_factory(struct block_device *bdev, struct io_factory **factory_p struct io_factory *factory; result = vdo_allocate(1, struct io_factory, __func__, &factory); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; factory->bdev = bdev; @@ -145,7 +145,7 @@ int uds_make_buffered_reader(struct io_factory *factory, off_t offset, u64 block return result; result = vdo_allocate(1, struct buffered_reader, "buffered reader", &reader); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { dm_bufio_client_destroy(client); return result; } @@ -283,7 +283,7 @@ int uds_make_buffered_writer(struct io_factory *factory, off_t offset, u64 block return result; result = vdo_allocate(1, struct buffered_writer, "buffered writer", &writer); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { dm_bufio_client_destroy(client); return result; } diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c index 4a4dc94915dd..46b7bc1ac324 100644 --- a/drivers/md/dm-vdo/indexer/open-chapter.c +++ b/drivers/md/dm-vdo/indexer/open-chapter.c @@ -71,14 +71,14 @@ int uds_make_open_chapter(const struct index_geometry *geometry, unsigned int zo result = vdo_allocate_extended(struct open_chapter_zone, slot_count, struct open_chapter_zone_slot, "open chapter", &open_chapter); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; open_chapter->slot_count = slot_count; open_chapter->capacity = capacity; result = vdo_allocate_cache_aligned(records_size(open_chapter), "record pages", &open_chapter->records); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_open_chapter(open_chapter); return result; } diff --git a/drivers/md/dm-vdo/indexer/radix-sort.c b/drivers/md/dm-vdo/indexer/radix-sort.c index 74ea18b8e9be..66b8c706a1ef 100644 --- a/drivers/md/dm-vdo/indexer/radix-sort.c +++ b/drivers/md/dm-vdo/indexer/radix-sort.c @@ -213,7 +213,7 @@ int uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter) result = vdo_allocate_extended(struct radix_sorter, stack_size, struct task, __func__, &radix_sorter); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; radix_sorter->count = count; diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.c b/drivers/md/dm-vdo/indexer/sparse-cache.c index e297ba2d6ceb..28920167827c 100644 --- a/drivers/md/dm-vdo/indexer/sparse-cache.c +++ b/drivers/md/dm-vdo/indexer/sparse-cache.c @@ -224,7 +224,7 @@ static int __must_check initialize_cached_chapter_index(struct cached_chapter_in result = vdo_allocate(chapter->index_pages_count, struct delta_index_page, __func__, &chapter->index_pages); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; return vdo_allocate(chapter->index_pages_count, struct dm_buffer *, @@ -242,7 +242,7 @@ static int __must_check make_search_list(struct sparse_cache *cache, bytes = (sizeof(struct search_list) + (cache->capacity * sizeof(struct cached_chapter_index *))); result = vdo_allocate_cache_aligned(bytes, "search list", &list); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; list->capacity = cache->capacity; @@ -265,7 +265,7 @@ int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int ca bytes = (sizeof(struct sparse_cache) + (capacity * sizeof(struct cached_chapter_index))); result = vdo_allocate_cache_aligned(bytes, "sparse cache", &cache); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; cache->geometry = geometry; @@ -296,7 +296,7 @@ int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int ca /* purge_search_list() needs some temporary lists for sorting. */ result = vdo_allocate(capacity * 2, struct cached_chapter_index *, "scratch entries", &cache->scratch_entries); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) goto out; *cache_ptr = cache; diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index 1a762e6dd709..1cc9ac4fe510 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -1213,7 +1213,7 @@ static int initialize_volume_sub_index(const struct uds_configuration *config, /* The following arrays are initialized to all zeros. */ result = vdo_allocate(params.list_count, u64, "first chapter to flush", &sub_index->flush_chapters); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; return vdo_allocate(zone_count, struct volume_sub_index_zone, @@ -1229,7 +1229,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non int result; result = vdo_allocate(1, struct volume_index, "volume index", &volume_index); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; volume_index->zone_count = config->zone_count; @@ -1251,7 +1251,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non result = vdo_allocate(config->zone_count, struct volume_index_zone, "volume index zones", &volume_index->zones); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_volume_index(volume_index); return result; } diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index 2d8901732f5d..959dd82ef665 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -1509,22 +1509,22 @@ static int __must_check initialize_page_cache(struct page_cache *cache, result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read, "volume read queue", &cache->read_queue); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate(cache->zone_count, struct search_pending_counter, "Volume Cache Zones", &cache->search_pending_counters); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate(cache->indexable_pages, u16, "page cache index", &cache->index); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; result = vdo_allocate(cache->cache_slots, struct cached_page, "page cache cache", &cache->cache); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* Initialize index values to invalid values. */ @@ -1547,7 +1547,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout int result; result = vdo_allocate(1, struct volume, "volume", &volume); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; volume->nonce = uds_get_volume_nonce(layout); @@ -1586,7 +1586,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout result = vdo_allocate(geometry->records_per_page, const struct uds_volume_record *, "record pointers", &volume->record_pointers); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_volume(volume); return result; } @@ -1626,7 +1626,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout result = vdo_allocate(config->read_threads, struct thread *, "reader threads", &volume->reader_threads); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_free_volume(volume); return result; } diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c index 0bd742ecbe2e..9849d12f2a36 100644 --- a/drivers/md/dm-vdo/int-map.c +++ b/drivers/md/dm-vdo/int-map.c @@ -152,7 +152,7 @@ static u64 hash_key(u64 key) * @map: The map to initialize. * @capacity: The initial capacity of the map. * - * Return: UDS_SUCCESS or an error code. + * Return: VDO_SUCCESS or an error code. */ static int allocate_buckets(struct int_map *map, size_t capacity) { @@ -174,7 +174,7 @@ static int allocate_buckets(struct int_map *map, size_t capacity) * tells the map to use its own small default). * @map_ptr: Output, a pointer to hold the new int_map. * - * Return: UDS_SUCCESS or an error code. + * Return: VDO_SUCCESS or an error code. */ int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr) { @@ -183,7 +183,7 @@ int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr) size_t capacity; result = vdo_allocate(1, struct int_map, "struct int_map", &map); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* Use the default capacity if the caller did not specify one. */ @@ -196,13 +196,13 @@ int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr) capacity = capacity * 100 / DEFAULT_LOAD; result = allocate_buckets(map, capacity); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { vdo_int_map_free(vdo_forget(map)); return result; } *map_ptr = map; - return UDS_SUCCESS; + return VDO_SUCCESS; } /** @@ -368,7 +368,7 @@ void *vdo_int_map_get(struct int_map *map, u64 key) * * Resizes and rehashes all the existing entries, storing them in the new buckets. * - * Return: UDS_SUCCESS or an error code. + * Return: VDO_SUCCESS or an error code. */ static int resize_buckets(struct int_map *map) { @@ -384,7 +384,7 @@ static int resize_buckets(struct int_map *map) uds_log_info("%s: attempting resize from %zu to %zu, current size=%zu", __func__, map->capacity, new_capacity, map->size); result = allocate_buckets(map, new_capacity); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { *map = old_map; return result; } @@ -407,7 +407,7 @@ static int resize_buckets(struct int_map *map) /* Destroy the old bucket array. */ vdo_free(vdo_forget(old_map.buckets)); - return UDS_SUCCESS; + return VDO_SUCCESS; } /** @@ -647,7 +647,7 @@ int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, * large maps). */ result = resize_buckets(map); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; /* diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 23549b7e9e6d..b0f1ba810cd0 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -383,7 +383,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter result = vdo_allocate_extended(struct io_submitter, thread_count, struct bio_queue_data, "bio submission data", &io_submitter); - if (result != UDS_SUCCESS) + if (result != VDO_SUCCESS) return result; io_submitter->bio_queue_rotation_interval = rotation_interval; diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c index ec24fff2a21b..18c9d2af8aed 100644 --- a/drivers/md/dm-vdo/message-stats.c +++ b/drivers/md/dm-vdo/message-stats.c @@ -420,7 +420,7 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen) int result; result = vdo_allocate(1, struct vdo_statistics, __func__, &stats); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_log_error("Cannot allocate memory to write VDO statistics"); return result; } diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 2d2cccf89edb..97208c9e0062 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -2379,7 +2379,7 @@ static int allocate_slab_counters(struct vdo_slab *slab) bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD); result = vdo_allocate(bytes, vdo_refcount_t, "ref counts array", &slab->counters); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { vdo_free(vdo_forget(slab->reference_blocks)); return result; } diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c index ad7682784459..b4aa71fffdbf 100644 --- a/drivers/md/dm-vdo/thread-utils.c +++ b/drivers/md/dm-vdo/thread-utils.c @@ -83,7 +83,7 @@ int vdo_create_thread(void (*thread_function)(void *), void *thread_data, int result; result = vdo_allocate(1, struct thread, __func__, &thread); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { uds_log_warning("Error allocating memory for %s", name); return result; } diff --git a/drivers/md/dm-vdo/uds-sysfs.c b/drivers/md/dm-vdo/uds-sysfs.c index d1d5a30b3717..753d81d6f207 100644 --- a/drivers/md/dm-vdo/uds-sysfs.c +++ b/drivers/md/dm-vdo/uds-sysfs.c @@ -35,7 +35,7 @@ static char *buffer_to_string(const char *buf, size_t length) { char *string; - if (vdo_allocate(length + 1, char, __func__, &string) != UDS_SUCCESS) + if (vdo_allocate(length + 1, char, __func__, &string) != VDO_SUCCESS) return NULL; memcpy(string, buf, length); diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index ae62f260c5ec..b4dd0634a5cb 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -545,7 +545,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason, *reason = "Unspecified error"; result = vdo_allocate(1, struct vdo, __func__, &vdo); - if (result != UDS_SUCCESS) { + if (result != VDO_SUCCESS) { *reason = "Cannot allocate VDO"; return result; } -- cgit v1.2.3 From 6a79248b425dcddc749ecbe0a2e1017afb5fdcd2 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 13 Feb 2024 14:57:33 -0600 Subject: dm vdo permassert: audit all of ASSERT to test for VDO_SUCCESS Also rename ASSERT to VDO_ASSERT and ASSERT_LOG_ONLY to VDO_ASSERT_LOG_ONLY. But re-introduce ASSERT and ASSERT_LOG_ONLY as a placeholder for the benefit of dm-vdo/indexer. Signed-off-by: Mike Snitzer Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/action-manager.c | 8 +- drivers/md/dm-vdo/block-map.c | 118 ++++++++++++------------- drivers/md/dm-vdo/completion.c | 10 +-- drivers/md/dm-vdo/completion.h | 6 +- drivers/md/dm-vdo/data-vio.c | 108 +++++++++++------------ drivers/md/dm-vdo/data-vio.h | 68 +++++++-------- drivers/md/dm-vdo/dedupe.c | 165 +++++++++++++++++------------------ drivers/md/dm-vdo/dm-vdo-target.c | 38 ++++---- drivers/md/dm-vdo/encodings.c | 156 ++++++++++++++++----------------- drivers/md/dm-vdo/errors.c | 5 +- drivers/md/dm-vdo/flush.c | 22 ++--- drivers/md/dm-vdo/funnel-workqueue.c | 22 ++--- drivers/md/dm-vdo/io-submitter.c | 8 +- drivers/md/dm-vdo/logical-zone.c | 22 ++--- drivers/md/dm-vdo/memory-alloc.c | 12 +-- drivers/md/dm-vdo/packer.c | 12 +-- drivers/md/dm-vdo/permassert.h | 15 ++-- drivers/md/dm-vdo/physical-zone.c | 48 +++++----- drivers/md/dm-vdo/priority-table.c | 4 +- drivers/md/dm-vdo/recovery-journal.c | 60 ++++++------- drivers/md/dm-vdo/repair.c | 12 +-- drivers/md/dm-vdo/slab-depot.c | 116 ++++++++++++------------ drivers/md/dm-vdo/thread-registry.c | 4 +- drivers/md/dm-vdo/vdo.c | 32 +++---- drivers/md/dm-vdo/vio.c | 40 ++++----- drivers/md/dm-vdo/vio.h | 8 +- 26 files changed, 561 insertions(+), 558 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c index 709be4c17d27..a0e5e7077d13 100644 --- a/drivers/md/dm-vdo/action-manager.c +++ b/drivers/md/dm-vdo/action-manager.c @@ -177,8 +177,8 @@ static void apply_to_zone(struct vdo_completion *completion) zone_count_t zone; struct action_manager *manager = as_action_manager(completion); - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)), - "%s() called on acting zones's thread", __func__); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)), + "%s() called on acting zones's thread", __func__); zone = manager->acting_zone++; if (manager->acting_zone == manager->zones) { @@ -357,8 +357,8 @@ bool vdo_schedule_operation_with_context(struct action_manager *manager, { struct action *current_action; - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id), - "action initiated from correct thread"); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id), + "action initiated from correct thread"); if (!manager->current_action->in_use) { current_action = manager->current_action; } else if (!manager->current_action->next->in_use) { diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index 320e76527e2b..b70294d8bb61 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -246,16 +246,16 @@ static inline void assert_on_cache_thread(struct vdo_page_cache *cache, { thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), - "%s() must only be called on cache thread %d, not thread %d", - function_name, cache->zone->thread_id, thread_id); + VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), + "%s() must only be called on cache thread %d, not thread %d", + function_name, cache->zone->thread_id, thread_id); } /** assert_io_allowed() - Assert that a page cache may issue I/O. */ static inline void assert_io_allowed(struct vdo_page_cache *cache) { - ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), - "VDO page cache may issue I/O"); + VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), + "VDO page cache may issue I/O"); } /** report_cache_pressure() - Log and, if enabled, report cache pressure. */ @@ -287,9 +287,9 @@ static const char * __must_check get_page_state_name(enum vdo_page_buffer_state BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT); - result = ASSERT(state < ARRAY_SIZE(state_names), - "Unknown page_state value %d", state); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(state < ARRAY_SIZE(state_names), + "Unknown page_state value %d", state); + if (result != VDO_SUCCESS) return "[UNKNOWN PAGE STATE]"; return state_names[state]; @@ -378,8 +378,8 @@ static int __must_check set_info_pbn(struct page_info *info, physical_block_numb struct vdo_page_cache *cache = info->cache; /* Either the new or the old page number must be NO_PAGE. */ - int result = ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE), - "Must free a page before reusing it."); + int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE), + "Must free a page before reusing it."); if (result != VDO_SUCCESS) return result; @@ -401,13 +401,13 @@ static int reset_page_info(struct page_info *info) { int result; - result = ASSERT(info->busy == 0, "VDO Page must not be busy"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(!vdo_waitq_has_waiters(&info->waiting), - "VDO Page must not have waiters"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting), + "VDO Page must not have waiters"); + if (result != VDO_SUCCESS) return result; result = set_info_pbn(info, NO_PAGE); @@ -592,29 +592,29 @@ static int __must_check validate_completed_page(struct vdo_page_completion *comp { int result; - result = ASSERT(completion->ready, "VDO Page completion not ready"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(completion->ready, "VDO Page completion not ready"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(completion->info != NULL, - "VDO Page Completion must be complete"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(completion->info != NULL, + "VDO Page Completion must be complete"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(completion->info->pbn == completion->pbn, - "VDO Page Completion pbn must be consistent"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(completion->info->pbn == completion->pbn, + "VDO Page Completion pbn must be consistent"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(is_valid(completion->info), - "VDO Page Completion page must be valid"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(is_valid(completion->info), + "VDO Page Completion page must be valid"); + if (result != VDO_SUCCESS) return result; if (writable) { - result = ASSERT(completion->writable, - "VDO Page Completion must be writable"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(completion->writable, + "VDO Page Completion must be writable"); + if (result != VDO_SUCCESS) return result; } @@ -776,7 +776,7 @@ static int __must_check launch_page_load(struct page_info *info, if (result != VDO_SUCCESS) return result; - result = ASSERT((info->busy == 0), "Page is not busy before loading."); + result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading."); if (result != VDO_SUCCESS) return result; @@ -949,8 +949,8 @@ static void discard_a_page(struct vdo_page_cache *cache) return; } - ASSERT_LOG_ONLY(!is_in_flight(info), - "page selected for discard is not in flight"); + VDO_ASSERT_LOG_ONLY(!is_in_flight(info), + "page selected for discard is not in flight"); cache->discard_count++; info->write_status = WRITE_STATUS_DISCARD; @@ -1153,8 +1153,8 @@ void vdo_release_page_completion(struct vdo_completion *completion) discard_info = page_completion->info; } - ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), - "Page being released after leaving all queues"); + VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), + "Page being released after leaving all queues"); page_completion->info = NULL; cache = page_completion->cache; @@ -1217,8 +1217,8 @@ void vdo_get_page(struct vdo_page_completion *page_completion, struct page_info *info; assert_on_cache_thread(cache, __func__); - ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), - "New page completion was not already on a wait queue"); + VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), + "New page completion was not already on a wait queue"); *page_completion = (struct vdo_page_completion) { .pbn = pbn, @@ -1265,7 +1265,7 @@ void vdo_get_page(struct vdo_page_completion *page_completion, } /* Something horrible has gone wrong. */ - ASSERT_LOG_ONLY(false, "Info found in a usable state."); + VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state."); } /* The page must be fetched. */ @@ -1334,7 +1334,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache) /* Make sure we don't throw away any dirty pages. */ for (info = cache->infos; info < cache->infos + cache->page_count; info++) { - int result = ASSERT(!is_dirty(info), "cache must have no dirty pages"); + int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages"); if (result != VDO_SUCCESS) return result; @@ -1440,10 +1440,10 @@ static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b) { int result; - result = ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && - in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), - "generation(s) %u, %u are out of range [%u, %u]", - a, b, zone->oldest_generation, zone->generation); + result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && + in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), + "generation(s) %u, %u are out of range [%u, %u]", + a, b, zone->oldest_generation, zone->generation); if (result != VDO_SUCCESS) { enter_zone_read_only_mode(zone, result); return true; @@ -1456,8 +1456,8 @@ static void release_generation(struct block_map_zone *zone, u8 generation) { int result; - result = ASSERT((zone->dirty_page_counts[generation] > 0), - "dirty page count underflow for generation %u", generation); + result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0), + "dirty page count underflow for generation %u", generation); if (result != VDO_SUCCESS) { enter_zone_read_only_mode(zone, result); return; @@ -1482,8 +1482,8 @@ static void set_generation(struct block_map_zone *zone, struct tree_page *page, page->generation = new_generation; new_count = ++zone->dirty_page_counts[new_generation]; - result = ASSERT((new_count != 0), "dirty page count overflow for generation %u", - new_generation); + result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u", + new_generation); if (result != VDO_SUCCESS) { enter_zone_read_only_mode(zone, result); return; @@ -1698,15 +1698,15 @@ static void release_page_lock(struct data_vio *data_vio, char *what) struct tree_lock *lock_holder; struct tree_lock *lock = &data_vio->tree_lock; - ASSERT_LOG_ONLY(lock->locked, - "release of unlocked block map page %s for key %llu in tree %u", - what, (unsigned long long) lock->key, lock->root_index); + VDO_ASSERT_LOG_ONLY(lock->locked, + "release of unlocked block map page %s for key %llu in tree %u", + what, (unsigned long long) lock->key, lock->root_index); zone = data_vio->logical.zone->block_map_zone; lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key); - ASSERT_LOG_ONLY((lock_holder == lock), - "block map page %s mismatch for key %llu in tree %u", - what, (unsigned long long) lock->key, lock->root_index); + VDO_ASSERT_LOG_ONLY((lock_holder == lock), + "block map page %s mismatch for key %llu in tree %u", + what, (unsigned long long) lock->key, lock->root_index); lock->locked = false; } @@ -2008,8 +2008,8 @@ static void write_expired_elements(struct block_map_zone *zone) list_del_init(&page->entry); - result = ASSERT(!vdo_waiter_is_waiting(&page->waiter), - "Newly expired page not already waiting to write"); + result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter), + "Newly expired page not already waiting to write"); if (result != VDO_SUCCESS) { enter_zone_read_only_mode(zone, result); continue; @@ -2867,8 +2867,8 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE != ((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) / sizeof(struct block_map_entry))); - result = ASSERT(cache_size > 0, "block map cache size is specified"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(cache_size > 0, "block map cache size is specified"); + if (result != VDO_SUCCESS) return result; result = vdo_allocate_extended(struct block_map, @@ -2937,7 +2937,7 @@ void vdo_initialize_block_map_from_journal(struct block_map *map, for (z = 0; z < map->zone_count; z++) { struct dirty_lists *dirty_lists = map->zones[z].dirty_lists; - ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set"); + VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set"); dirty_lists->oldest_period = map->current_era_point; dirty_lists->next_period = map->current_era_point + 1; dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age; @@ -2971,8 +2971,8 @@ static void initiate_drain(struct admin_state *state) { struct block_map_zone *zone = container_of(state, struct block_map_zone, state); - ASSERT_LOG_ONLY((zone->active_lookups == 0), - "%s() called with no active lookups", __func__); + VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0), + "%s() called with no active lookups", __func__); if (!vdo_is_state_suspending(state)) { while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period) diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c index 9e2381dc3683..5ad85334632d 100644 --- a/drivers/md/dm-vdo/completion.c +++ b/drivers/md/dm-vdo/completion.c @@ -60,7 +60,7 @@ void vdo_initialize_completion(struct vdo_completion *completion, static inline void assert_incomplete(struct vdo_completion *completion) { - ASSERT_LOG_ONLY(!completion->complete, "completion is not complete"); + VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete"); } /** @@ -111,10 +111,10 @@ void vdo_enqueue_completion(struct vdo_completion *completion, struct vdo *vdo = completion->vdo; thread_id_t thread_id = completion->callback_thread_id; - if (ASSERT(thread_id < vdo->thread_config.thread_count, - "thread_id %u (completion type %d) is less than thread count %u", - thread_id, completion->type, - vdo->thread_config.thread_count) != UDS_SUCCESS) + if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count, + "thread_id %u (completion type %d) is less than thread count %u", + thread_id, completion->type, + vdo->thread_config.thread_count) != VDO_SUCCESS) BUG(); completion->requeue = false; diff --git a/drivers/md/dm-vdo/completion.h b/drivers/md/dm-vdo/completion.h index aa145d73a686..3407f34ce58c 100644 --- a/drivers/md/dm-vdo/completion.h +++ b/drivers/md/dm-vdo/completion.h @@ -85,9 +85,9 @@ static inline void vdo_fail_completion(struct vdo_completion *completion, int re static inline int vdo_assert_completion_type(struct vdo_completion *completion, enum vdo_completion_type expected) { - return ASSERT(expected == completion->type, - "completion type should be %u, not %u", expected, - completion->type); + return VDO_ASSERT(expected == completion->type, + "completion type should be %u, not %u", expected, + completion->type); } static inline void vdo_set_completion_callback(struct vdo_completion *completion, diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 3d5054e61330..51c49fad1b8b 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -232,8 +232,8 @@ static bool check_for_drain_complete_locked(struct data_vio_pool *pool) if (pool->limiter.busy > 0) return false; - ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0), - "no outstanding discard permits"); + VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0), + "no outstanding discard permits"); return (bio_list_empty(&pool->limiter.new_waiters) && bio_list_empty(&pool->discard_limiter.new_waiters)); @@ -277,9 +277,9 @@ static void acknowledge_data_vio(struct data_vio *data_vio) if (bio == NULL) return; - ASSERT_LOG_ONLY((data_vio->remaining_discard <= - (u32) (VDO_BLOCK_SIZE - data_vio->offset)), - "data_vio to acknowledge is not an incomplete discard"); + VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <= + (u32) (VDO_BLOCK_SIZE - data_vio->offset)), + "data_vio to acknowledge is not an incomplete discard"); data_vio->user_bio = NULL; vdo_count_bios(&vdo->stats.bios_acknowledged, bio); @@ -443,7 +443,7 @@ static void attempt_logical_block_lock(struct vdo_completion *completion) return; } - result = ASSERT(lock_holder->logical.locked, "logical block lock held"); + result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held"); if (result != VDO_SUCCESS) { continue_data_vio_with_error(data_vio, result); return; @@ -627,9 +627,9 @@ static void update_limiter(struct limiter *limiter) struct bio_list *waiters = &limiter->waiters; data_vio_count_t available = limiter->limit - limiter->busy; - ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy), - "Release count %u is not more than busy count %u", - limiter->release_count, limiter->busy); + VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy), + "Release count %u is not more than busy count %u", + limiter->release_count, limiter->busy); get_waiters(limiter); for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--) @@ -850,8 +850,8 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, if (result != VDO_SUCCESS) return result; - ASSERT_LOG_ONLY((discard_limit <= pool_size), - "discard limit does not exceed pool size"); + VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size), + "discard limit does not exceed pool size"); initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit, discard_limit); pool->discard_limiter.permitted_waiters = &pool->permitted_discards; @@ -908,15 +908,15 @@ void free_data_vio_pool(struct data_vio_pool *pool) BUG_ON(atomic_read(&pool->processing)); spin_lock(&pool->lock); - ASSERT_LOG_ONLY((pool->limiter.busy == 0), - "data_vio pool must not have %u busy entries when being freed", - pool->limiter.busy); - ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) && - bio_list_empty(&pool->limiter.new_waiters)), - "data_vio pool must not have threads waiting to read or write when being freed"); - ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) && - bio_list_empty(&pool->discard_limiter.new_waiters)), - "data_vio pool must not have threads waiting to discard when being freed"); + VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0), + "data_vio pool must not have %u busy entries when being freed", + pool->limiter.busy); + VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) && + bio_list_empty(&pool->limiter.new_waiters)), + "data_vio pool must not have threads waiting to read or write when being freed"); + VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) && + bio_list_empty(&pool->discard_limiter.new_waiters)), + "data_vio pool must not have threads waiting to discard when being freed"); spin_unlock(&pool->lock); list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) { @@ -961,8 +961,8 @@ void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio) { struct data_vio *data_vio; - ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state), - "data_vio_pool not quiescent on acquire"); + VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state), + "data_vio_pool not quiescent on acquire"); bio->bi_private = (void *) jiffies; spin_lock(&pool->lock); @@ -998,8 +998,8 @@ static void initiate_drain(struct admin_state *state) static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread), - "%s called on cpu thread", name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread), + "%s called on cpu thread", name); } /** @@ -1173,17 +1173,17 @@ static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock) /* The lock is not locked, so it had better not be registered in the lock map. */ struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn); - ASSERT_LOG_ONLY((data_vio != lock_holder), - "no logical block lock held for block %llu", - (unsigned long long) lock->lbn); + VDO_ASSERT_LOG_ONLY((data_vio != lock_holder), + "no logical block lock held for block %llu", + (unsigned long long) lock->lbn); return; } /* Release the lock by removing the lock from the map. */ lock_holder = vdo_int_map_remove(lock_map, lock->lbn); - ASSERT_LOG_ONLY((data_vio == lock_holder), - "logical block lock mismatch for block %llu", - (unsigned long long) lock->lbn); + VDO_ASSERT_LOG_ONLY((data_vio == lock_holder), + "logical block lock mismatch for block %llu", + (unsigned long long) lock->lbn); lock->locked = false; } @@ -1193,7 +1193,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) struct data_vio *lock_holder, *next_lock_holder; int result; - ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked"); + VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked"); /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */ next_lock_holder = @@ -1210,9 +1210,9 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) return; } - ASSERT_LOG_ONLY((lock_holder == data_vio), - "logical block lock mismatch for block %llu", - (unsigned long long) lock->lbn); + VDO_ASSERT_LOG_ONLY((lock_holder == data_vio), + "logical block lock mismatch for block %llu", + (unsigned long long) lock->lbn); lock->locked = false; /* @@ -1275,10 +1275,10 @@ static void finish_cleanup(struct data_vio *data_vio) { struct vdo_completion *completion = &data_vio->vio.completion; - ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, - "complete data_vio has no allocation lock"); - ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, - "complete data_vio has no hash lock"); + VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, + "complete data_vio has no allocation lock"); + VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, + "complete data_vio has no hash lock"); if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) || (completion->result != VDO_SUCCESS)) { struct data_vio_pool *pool = completion->vdo->data_vio_pool; @@ -1404,8 +1404,8 @@ void data_vio_allocate_data_block(struct data_vio *data_vio, { struct allocation *allocation = &data_vio->allocation; - ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK), - "data_vio does not have an allocation"); + VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK), + "data_vio does not have an allocation"); allocation->write_lock_type = write_lock_type; allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone); allocation->first_allocation_zone = allocation->zone->zone_number; @@ -1796,11 +1796,11 @@ static void compress_data_vio(struct vdo_completion *completion) */ void launch_compress_data_vio(struct data_vio *data_vio) { - ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block"); - ASSERT_LOG_ONLY(data_vio->hash_lock != NULL, - "data_vio to compress has a hash_lock"); - ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio), - "data_vio to compress has an allocation"); + VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block"); + VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL, + "data_vio to compress has a hash_lock"); + VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio), + "data_vio to compress has an allocation"); /* * There are 4 reasons why a data_vio which has reached this point will not be eligible for @@ -1841,7 +1841,7 @@ static void hash_data_vio(struct vdo_completion *completion) struct data_vio *data_vio = as_data_vio(completion); assert_data_vio_on_cpu_thread(data_vio); - ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed"); + VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed"); murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be, &data_vio->record_name); @@ -1856,7 +1856,7 @@ static void hash_data_vio(struct vdo_completion *completion) static void prepare_for_dedupe(struct data_vio *data_vio) { /* We don't care what thread we are on. */ - ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks"); + VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks"); /* * Before we can dedupe, we need to know the record name, so the first @@ -1929,11 +1929,11 @@ static void acknowledge_write_callback(struct vdo_completion *completion) struct data_vio *data_vio = as_data_vio(completion); struct vdo *vdo = completion->vdo; - ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) || - (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)), - "%s() called on bio ack queue", __func__); - ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio), - "write VIO to be acknowledged has a flush generation lock"); + VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) || + (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)), + "%s() called on bio ack queue", __func__); + VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio), + "write VIO to be acknowledged has a flush generation lock"); acknowledge_data_vio(data_vio); if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) { /* This is a zero write or discard */ @@ -1998,8 +1998,8 @@ static void handle_allocation_error(struct vdo_completion *completion) static int assert_is_discard(struct data_vio *data_vio) { - int result = ASSERT(data_vio->is_discard, - "data_vio with no block map page is a discard"); + int result = VDO_ASSERT(data_vio->is_discard, + "data_vio with no block map page is a discard"); return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY); } diff --git a/drivers/md/dm-vdo/data-vio.h b/drivers/md/dm-vdo/data-vio.h index 44fd0d8ccb76..25926b6cd98b 100644 --- a/drivers/md/dm-vdo/data-vio.h +++ b/drivers/md/dm-vdo/data-vio.h @@ -280,7 +280,7 @@ struct data_vio { static inline struct data_vio *vio_as_data_vio(struct vio *vio) { - ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); + VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); return container_of(vio, struct data_vio, vio); } @@ -374,9 +374,9 @@ static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio) * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an * inline, and the LBN better than nothing as an identifier. */ - ASSERT_LOG_ONLY((expected == thread_id), - "data_vio for logical block %llu on thread %u, should be on hash zone thread %u", - (unsigned long long) data_vio->logical.lbn, thread_id, expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "data_vio for logical block %llu on thread %u, should be on hash zone thread %u", + (unsigned long long) data_vio->logical.lbn, thread_id, expected); } static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio, @@ -402,9 +402,9 @@ static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio) thread_id_t expected = data_vio->logical.zone->thread_id; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "data_vio for logical block %llu on thread %u, should be on thread %u", - (unsigned long long) data_vio->logical.lbn, thread_id, expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "data_vio for logical block %llu on thread %u, should be on thread %u", + (unsigned long long) data_vio->logical.lbn, thread_id, expected); } static inline void set_data_vio_logical_callback(struct data_vio *data_vio, @@ -430,10 +430,10 @@ static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio) thread_id_t expected = data_vio->allocation.zone->thread_id; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u", - (unsigned long long) data_vio->allocation.pbn, thread_id, - expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u", + (unsigned long long) data_vio->allocation.pbn, thread_id, + expected); } static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio, @@ -460,10 +460,10 @@ static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio) thread_id_t expected = data_vio->duplicate.zone->thread_id; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "data_vio for duplicate physical block %llu on thread %u, should be on thread %u", - (unsigned long long) data_vio->duplicate.pbn, thread_id, - expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "data_vio for duplicate physical block %llu on thread %u, should be on thread %u", + (unsigned long long) data_vio->duplicate.pbn, thread_id, + expected); } static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio, @@ -490,9 +490,9 @@ static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio) thread_id_t expected = data_vio->mapped.zone->thread_id; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "data_vio for mapped physical block %llu on thread %u, should be on thread %u", - (unsigned long long) data_vio->mapped.pbn, thread_id, expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "data_vio for mapped physical block %llu on thread %u, should be on thread %u", + (unsigned long long) data_vio->mapped.pbn, thread_id, expected); } static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio, @@ -507,10 +507,10 @@ static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio) thread_id_t expected = data_vio->new_mapped.zone->thread_id; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u", - (unsigned long long) data_vio->new_mapped.pbn, thread_id, - expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u", + (unsigned long long) data_vio->new_mapped.pbn, thread_id, + expected); } static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio, @@ -525,10 +525,10 @@ static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio) thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((journal_thread == thread_id), - "data_vio for logical block %llu on thread %u, should be on journal thread %u", - (unsigned long long) data_vio->logical.lbn, thread_id, - journal_thread); + VDO_ASSERT_LOG_ONLY((journal_thread == thread_id), + "data_vio for logical block %llu on thread %u, should be on journal thread %u", + (unsigned long long) data_vio->logical.lbn, thread_id, + journal_thread); } static inline void set_data_vio_journal_callback(struct data_vio *data_vio, @@ -555,10 +555,10 @@ static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio) thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((packer_thread == thread_id), - "data_vio for logical block %llu on thread %u, should be on packer thread %u", - (unsigned long long) data_vio->logical.lbn, thread_id, - packer_thread); + VDO_ASSERT_LOG_ONLY((packer_thread == thread_id), + "data_vio for logical block %llu on thread %u, should be on packer thread %u", + (unsigned long long) data_vio->logical.lbn, thread_id, + packer_thread); } static inline void set_data_vio_packer_callback(struct data_vio *data_vio, @@ -585,10 +585,10 @@ static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio) thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread; thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((cpu_thread == thread_id), - "data_vio for logical block %llu on thread %u, should be on cpu thread %u", - (unsigned long long) data_vio->logical.lbn, thread_id, - cpu_thread); + VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id), + "data_vio for logical block %llu on thread %u, should be on cpu thread %u", + (unsigned long long) data_vio->logical.lbn, thread_id, + cpu_thread); } static inline void set_data_vio_cpu_callback(struct data_vio *data_vio, diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 7cdbe825116f..52bdf657db64 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -327,8 +327,8 @@ static inline struct hash_zones *as_hash_zones(struct vdo_completion *completion static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), - "%s called on hash zone thread", name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), + "%s called on hash zone thread", name); } static inline bool change_context_state(struct dedupe_context *context, int old, int new) @@ -404,8 +404,8 @@ static void assert_hash_lock_agent(struct data_vio *data_vio, const char *where) { /* Not safe to access the agent field except from the hash zone. */ assert_data_vio_in_hash_zone(data_vio); - ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent, - "%s must be for the hash lock agent", where); + VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent, + "%s must be for the hash lock agent", where); } /** @@ -416,9 +416,8 @@ static void assert_hash_lock_agent(struct data_vio *data_vio, const char *where) */ static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock) { - ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL), - "hash lock must not already hold a duplicate lock"); - + VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL), + "hash lock must not already hold a duplicate lock"); pbn_lock->holder_count += 1; hash_lock->duplicate_lock = pbn_lock; } @@ -446,12 +445,12 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock) struct hash_lock *old_lock = data_vio->hash_lock; if (old_lock != NULL) { - ASSERT_LOG_ONLY(data_vio->hash_zone != NULL, - "must have a hash zone when holding a hash lock"); - ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry), - "must be on a hash lock ring when holding a hash lock"); - ASSERT_LOG_ONLY(old_lock->reference_count > 0, - "hash lock reference must be counted"); + VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL, + "must have a hash zone when holding a hash lock"); + VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry), + "must be on a hash lock ring when holding a hash lock"); + VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0, + "hash lock reference must be counted"); if ((old_lock->state != VDO_HASH_LOCK_BYPASSING) && (old_lock->state != VDO_HASH_LOCK_UNLOCKING)) { @@ -459,9 +458,9 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock) * If the reference count goes to zero in a non-terminal state, we're most * likely leaking this lock. */ - ASSERT_LOG_ONLY(old_lock->reference_count > 1, - "hash locks should only become unreferenced in a terminal state, not state %s", - get_hash_lock_state_name(old_lock->state)); + VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 1, + "hash locks should only become unreferenced in a terminal state, not state %s", + get_hash_lock_state_name(old_lock->state)); } list_del_init(&data_vio->hash_lock_entry); @@ -641,8 +640,8 @@ static void finish_unlocking(struct vdo_completion *completion) assert_hash_lock_agent(agent, __func__); - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, - "must have released the duplicate lock for the hash lock"); + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, + "must have released the duplicate lock for the hash lock"); if (!lock->verified) { /* @@ -696,8 +695,8 @@ static void unlock_duplicate_pbn(struct vdo_completion *completion) struct hash_lock *lock = agent->hash_lock; assert_data_vio_in_duplicate_zone(agent); - ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, - "must have a duplicate lock to release"); + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, + "must have a duplicate lock to release"); vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, vdo_forget(lock->duplicate_lock)); @@ -799,8 +798,8 @@ static void start_updating(struct hash_lock *lock, struct data_vio *agent) { lock->state = VDO_HASH_LOCK_UPDATING; - ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified"); - ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed"); + VDO_ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified"); + VDO_ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed"); agent->last_async_operation = VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX; set_data_vio_hash_zone_callback(agent, finish_updating); @@ -822,9 +821,9 @@ static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio) { struct data_vio *agent = data_vio; - ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), - "shouldn't have any lock waiters in DEDUPING"); + VDO_ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), + "shouldn't have any lock waiters in DEDUPING"); /* Just release the lock reference if other data_vios are still deduping. */ if (lock->reference_count > 1) { @@ -879,8 +878,8 @@ static int __must_check acquire_lock(struct hash_zone *zone, * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses * in the common case of no lock contention. */ - result = ASSERT(!list_empty(&zone->lock_pool), - "never need to wait for a free hash lock"); + result = VDO_ASSERT(!list_empty(&zone->lock_pool), + "never need to wait for a free hash lock"); if (result != VDO_SUCCESS) return result; @@ -902,11 +901,11 @@ static int __must_check acquire_lock(struct hash_zone *zone, if (replace_lock != NULL) { /* On mismatch put the old lock back and return a severe error */ - ASSERT_LOG_ONLY(lock == replace_lock, - "old lock must have been in the lock map"); + VDO_ASSERT_LOG_ONLY(lock == replace_lock, + "old lock must have been in the lock map"); /* TODO: Check earlier and bail out? */ - ASSERT_LOG_ONLY(replace_lock->registered, - "old lock must have been marked registered"); + VDO_ASSERT_LOG_ONLY(replace_lock->registered, + "old lock must have been marked registered"); replace_lock->registered = false; } @@ -1018,15 +1017,15 @@ static void start_deduping(struct hash_lock *lock, struct data_vio *agent, * deduplicate against it. */ if (lock->duplicate_lock == NULL) { - ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state), - "compression must have shared a lock"); - ASSERT_LOG_ONLY(agent_is_done, - "agent must have written the new duplicate"); + VDO_ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state), + "compression must have shared a lock"); + VDO_ASSERT_LOG_ONLY(agent_is_done, + "agent must have written the new duplicate"); transfer_allocation_lock(agent); } - ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock), - "duplicate_lock must be a PBN read lock"); + VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock), + "duplicate_lock must be a PBN read lock"); /* * This state is not like any of the other states. There is no designated agent--the agent @@ -1204,7 +1203,7 @@ static void start_verifying(struct hash_lock *lock, struct data_vio *agent) agent->scratch_block); lock->state = VDO_HASH_LOCK_VERIFYING; - ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once"); + VDO_ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once"); agent->last_async_operation = VIO_ASYNC_OP_VERIFY_DUPLICATION; result = vio_reset_bio(vio, buffer, verify_endio, REQ_OP_READ, @@ -1234,8 +1233,8 @@ static void finish_locking(struct vdo_completion *completion) assert_hash_lock_agent(agent, __func__); if (!agent->is_duplicate) { - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, - "must not hold duplicate_lock if not flagged as a duplicate"); + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, + "must not hold duplicate_lock if not flagged as a duplicate"); /* * LOCKING -> WRITING transition: The advice block is being modified or has no * available references, so try to write or compress the data, remembering to @@ -1247,8 +1246,8 @@ static void finish_locking(struct vdo_completion *completion) return; } - ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, - "must hold duplicate_lock if flagged as a duplicate"); + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, + "must hold duplicate_lock if flagged as a duplicate"); if (!lock->verified) { /* @@ -1418,8 +1417,8 @@ static void lock_duplicate_pbn(struct vdo_completion *completion) */ static void start_locking(struct hash_lock *lock, struct data_vio *agent) { - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, - "must not acquire a duplicate lock when already holding it"); + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, + "must not acquire a duplicate lock when already holding it"); lock->state = VDO_HASH_LOCK_LOCKING; @@ -1725,8 +1724,8 @@ static void start_querying(struct hash_lock *lock, struct data_vio *data_vio) */ static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio) { - ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s", - get_hash_lock_state_name(lock->state)); + VDO_ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s", + get_hash_lock_state_name(lock->state)); continue_data_vio_with_error(data_vio, VDO_LOCK_ERROR); } @@ -1748,8 +1747,8 @@ void vdo_continue_hash_lock(struct vdo_completion *completion) switch (lock->state) { case VDO_HASH_LOCK_WRITING: - ASSERT_LOG_ONLY(data_vio == lock->agent, - "only the lock agent may continue the lock"); + VDO_ASSERT_LOG_ONLY(data_vio == lock->agent, + "only the lock agent may continue the lock"); finish_writing(lock, data_vio); break; @@ -1815,18 +1814,18 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio int result; /* FIXME: BUG_ON() and/or enter read-only mode? */ - result = ASSERT(data_vio->hash_lock == NULL, - "must not already hold a hash lock"); + result = VDO_ASSERT(data_vio->hash_lock == NULL, + "must not already hold a hash lock"); if (result != VDO_SUCCESS) return result; - result = ASSERT(list_empty(&data_vio->hash_lock_entry), - "must not already be a member of a hash lock ring"); + result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry), + "must not already be a member of a hash lock ring"); if (result != VDO_SUCCESS) return result; - return ASSERT(data_vio->recovery_sequence_number == 0, - "must not hold a recovery lock when getting a hash lock"); + return VDO_ASSERT(data_vio->recovery_sequence_number == 0, + "must not hold a recovery lock when getting a hash lock"); } /** @@ -1933,24 +1932,24 @@ void vdo_release_hash_lock(struct data_vio *data_vio) struct hash_lock *removed; removed = vdo_int_map_remove(zone->hash_lock_map, lock_key); - ASSERT_LOG_ONLY(lock == removed, - "hash lock being released must have been mapped"); + VDO_ASSERT_LOG_ONLY(lock == removed, + "hash lock being released must have been mapped"); } else { - ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key), - "unregistered hash lock must not be in the lock map"); - } - - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), - "hash lock returned to zone must have no waiters"); - ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), - "hash lock returned to zone must not reference a PBN lock"); - ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING), - "returned hash lock must not be in use with state %s", - get_hash_lock_state_name(lock->state)); - ASSERT_LOG_ONLY(list_empty(&lock->pool_node), - "hash lock returned to zone must not be in a pool ring"); - ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring), - "hash lock returned to zone must not reference DataVIOs"); + VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key), + "unregistered hash lock must not be in the lock map"); + } + + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), + "hash lock returned to zone must have no waiters"); + VDO_ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), + "hash lock returned to zone must not reference a PBN lock"); + VDO_ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING), + "returned hash lock must not be in use with state %s", + get_hash_lock_state_name(lock->state)); + VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node), + "hash lock returned to zone must not be in a pool ring"); + VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring), + "hash lock returned to zone must not reference DataVIOs"); return_hash_lock_to_pool(zone, lock); } @@ -1965,13 +1964,13 @@ static void transfer_allocation_lock(struct data_vio *data_vio) struct allocation *allocation = &data_vio->allocation; struct hash_lock *hash_lock = data_vio->hash_lock; - ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn, - "transferred lock must be for the block written"); + VDO_ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn, + "transferred lock must be for the block written"); allocation->pbn = VDO_ZERO_BLOCK; - ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock), - "must have downgraded the allocation lock before transfer"); + VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock), + "must have downgraded the allocation lock before transfer"); hash_lock->duplicate = data_vio->new_mapped; data_vio->duplicate = data_vio->new_mapped; @@ -1997,10 +1996,10 @@ void vdo_share_compressed_write_lock(struct data_vio *data_vio, { bool claimed; - ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL, - "a duplicate PBN lock should not exist when writing"); - ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state), - "lock transfer must be for a compressed write"); + VDO_ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL, + "a duplicate PBN lock should not exist when writing"); + VDO_ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state), + "lock transfer must be for a compressed write"); assert_data_vio_in_new_mapped_zone(data_vio); /* First sharer downgrades the lock. */ @@ -2020,7 +2019,7 @@ void vdo_share_compressed_write_lock(struct data_vio *data_vio, * deduplicating against it before our incRef. */ claimed = vdo_claim_pbn_lock_increment(pbn_lock); - ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment"); + VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment"); } static void dedupe_kobj_release(struct kobject *directory) @@ -2296,8 +2295,8 @@ static void finish_index_operation(struct uds_request *request) */ if (!change_context_state(context, DEDUPE_CONTEXT_TIMED_OUT, DEDUPE_CONTEXT_TIMED_OUT_COMPLETE)) { - ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)", - atomic_read(&context->state)); + VDO_ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)", + atomic_read(&context->state)); } uds_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry); @@ -2341,7 +2340,7 @@ static void check_for_drain_complete(struct hash_zone *zone) if (recycled > 0) WRITE_ONCE(zone->active, zone->active - recycled); - ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive"); + VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive"); vdo_finish_draining(&zone->state); } diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 90ba379f8d3e..e493b2fec90b 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -904,8 +904,8 @@ static int vdo_map_bio(struct dm_target *ti, struct bio *bio) struct vdo_work_queue *current_work_queue; const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state); - ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", - code->name); + VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", + code->name); /* Count all incoming bios. */ vdo_count_bios(&vdo->stats.bios_in, bio); @@ -1244,9 +1244,9 @@ static int perform_admin_operation(struct vdo *vdo, u32 starting_phase, /* Assert that we are operating on the correct thread for the current phase. */ static void assert_admin_phase_thread(struct vdo *vdo, const char *what) { - ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), - "%s on correct thread for %s", what, - ADMIN_PHASE_NAMES[vdo->admin.phase]); + VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), + "%s on correct thread for %s", what, + ADMIN_PHASE_NAMES[vdo->admin.phase]); } /** @@ -1424,11 +1424,11 @@ static void release_instance(unsigned int instance) { mutex_lock(&instances_lock); if (instance >= instances.bit_count) { - ASSERT_LOG_ONLY(false, - "instance number %u must be less than bit count %u", - instance, instances.bit_count); + VDO_ASSERT_LOG_ONLY(false, + "instance number %u must be less than bit count %u", + instance, instances.bit_count); } else if (test_bit(instance, instances.words) == 0) { - ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance); + VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance); } else { __clear_bit(instance, instances.words); instances.count -= 1; @@ -1577,9 +1577,9 @@ static int allocate_instance(unsigned int *instance_ptr) if (instance >= instances.bit_count) { /* Nothing free after next, so wrap around to instance zero. */ instance = find_first_zero_bit(instances.words, instances.bit_count); - result = ASSERT(instance < instances.bit_count, - "impossibly, no zero bit found"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(instance < instances.bit_count, + "impossibly, no zero bit found"); + if (result != VDO_SUCCESS) return result; } @@ -1729,8 +1729,8 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_ uds_log_info("Preparing to resize physical to %llu", (unsigned long long) new_physical_blocks); - ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), - "New physical size is larger than current physical size"); + VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), + "New physical size is larger than current physical size"); result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START, check_may_grow_physical, finish_operation_callback, @@ -1829,8 +1829,8 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config, uds_log_info("Preparing to resize logical to %llu", (unsigned long long) config->logical_blocks); - ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), - "New logical size is larger than current size"); + VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), + "New logical size is larger than current size"); result = vdo_prepare_to_grow_block_map(vdo->block_map, config->logical_blocks); @@ -2890,9 +2890,9 @@ static void vdo_module_destroy(void) if (dm_registered) dm_unregister_target(&vdo_target_bio); - ASSERT_LOG_ONLY(instances.count == 0, - "should have no instance numbers still in use, but have %u", - instances.count); + VDO_ASSERT_LOG_ONLY(instances.count == 0, + "should have no instance numbers still in use, but have %u", + instances.count); vdo_free(instances.words); memset(&instances, 0, sizeof(struct instance_tracker)); diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index a97771fe0a43..e24c31bc3524 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -320,8 +320,8 @@ int __must_check vdo_parse_geometry_block(u8 *block, struct volume_geometry *geo decode_volume_geometry(block, &offset, geometry, header.version.major_version); - result = ASSERT(header.size == offset + sizeof(u32), - "should have decoded up to the geometry checksum"); + result = VDO_ASSERT(header.size == offset + sizeof(u32), + "should have decoded up to the geometry checksum"); if (result != VDO_SUCCESS) return result; @@ -380,25 +380,25 @@ static int decode_block_map_state_2_0(u8 *buffer, size_t *offset, initial_offset = *offset; decode_u64_le(buffer, offset, &flat_page_origin); - result = ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, - "Flat page origin must be %u (recorded as %llu)", - VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, - (unsigned long long) state->flat_page_origin); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, + "Flat page origin must be %u (recorded as %llu)", + VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, + (unsigned long long) state->flat_page_origin); + if (result != VDO_SUCCESS) return result; decode_u64_le(buffer, offset, &flat_page_count); - result = ASSERT(flat_page_count == 0, - "Flat page count must be 0 (recorded as %llu)", - (unsigned long long) state->flat_page_count); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(flat_page_count == 0, + "Flat page count must be 0 (recorded as %llu)", + (unsigned long long) state->flat_page_count); + if (result != VDO_SUCCESS) return result; decode_u64_le(buffer, offset, &root_origin); decode_u64_le(buffer, offset, &root_count); - result = ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, - "decoded block map component size must match header size"); + result = VDO_ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, + "decoded block map component size must match header size"); if (result != VDO_SUCCESS) return result; @@ -425,8 +425,8 @@ static void encode_block_map_state_2_0(u8 *buffer, size_t *offset, encode_u64_le(buffer, offset, state.root_origin); encode_u64_le(buffer, offset, state.root_count); - ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, - "encoded block map component size must match header size"); + VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, + "encoded block map component size must match header size"); } /** @@ -477,8 +477,8 @@ static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset, encode_u64_le(buffer, offset, state.logical_blocks_used); encode_u64_le(buffer, offset, state.block_map_data_blocks); - ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, - "encoded recovery journal component size must match header size"); + VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, + "encoded recovery journal component size must match header size"); } /** @@ -508,9 +508,9 @@ static int __must_check decode_recovery_journal_state_7_0(u8 *buffer, size_t *of decode_u64_le(buffer, offset, &logical_blocks_used); decode_u64_le(buffer, offset, &block_map_data_blocks); - result = ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, - "decoded recovery journal component size must match header size"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, + "decoded recovery journal component size must match header size"); + if (result != VDO_SUCCESS) return result; *state = (struct recovery_journal_state_7_0) { @@ -566,8 +566,8 @@ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, encode_u64_le(buffer, offset, state.last_block); buffer[(*offset)++] = state.zone_count; - ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, - "encoded block map component size must match header size"); + VDO_ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, + "encoded block map component size must match header size"); } /** @@ -618,9 +618,9 @@ static int decode_slab_depot_state_2_0(u8 *buffer, size_t *offset, decode_u64_le(buffer, offset, &last_block); zone_count = buffer[(*offset)++]; - result = ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, - "decoded slab depot component size must match header size"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, + "decoded slab depot component size must match header size"); + if (result != VDO_SUCCESS) return result; *state = (struct slab_depot_state_2_0) { @@ -970,7 +970,7 @@ struct partition *vdo_get_known_partition(struct layout *layout, enum partition_ struct partition *partition; int result = vdo_get_partition(layout, id, &partition); - ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id); + VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id); return partition; } @@ -982,8 +982,8 @@ static void encode_layout(u8 *buffer, size_t *offset, const struct layout *layou struct header header = VDO_LAYOUT_HEADER_3_0; BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8)); - ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX, - "layout partition count must fit in a byte"); + VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX, + "layout partition count must fit in a byte"); vdo_encode_header(buffer, offset, &header); @@ -992,8 +992,8 @@ static void encode_layout(u8 *buffer, size_t *offset, const struct layout *layou encode_u64_le(buffer, offset, layout->last_free); buffer[(*offset)++] = layout->num_partitions; - ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset, - "encoded size of a layout header must match structure"); + VDO_ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset, + "encoded size of a layout header must match structure"); for (partition = layout->head; partition != NULL; partition = partition->next) { buffer[(*offset)++] = partition->id; @@ -1003,8 +1003,8 @@ static void encode_layout(u8 *buffer, size_t *offset, const struct layout *layou encode_u64_le(buffer, offset, partition->count); } - ASSERT_LOG_ONLY(header.size == *offset - initial_offset, - "encoded size of a layout must match header size"); + VDO_ASSERT_LOG_ONLY(header.size == *offset - initial_offset, + "encoded size of a layout must match header size"); } static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t start, @@ -1035,8 +1035,8 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta .partition_count = partition_count, }; - result = ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset, - "decoded size of a layout header must match structure"); + result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset, + "decoded size of a layout header must match structure"); if (result != VDO_SUCCESS) return result; @@ -1208,29 +1208,29 @@ int vdo_validate_config(const struct vdo_config *config, struct slab_config slab_config; int result; - result = ASSERT(config->slab_size > 0, "slab size unspecified"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(is_power_of_2(config->slab_size), - "slab size must be a power of two"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(is_power_of_2(config->slab_size), + "slab size must be a power of two"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS), - "slab size must be less than or equal to 2^%d", - MAX_VDO_SLAB_BITS); + result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS), + "slab size must be less than or equal to 2^%d", + MAX_VDO_SLAB_BITS); if (result != VDO_SUCCESS) return result; - result = ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS, - "slab journal size meets minimum size"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS, + "slab journal size meets minimum size"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(config->slab_journal_blocks <= config->slab_size, - "slab journal size is within expected bound"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size, + "slab journal size is within expected bound"); + if (result != VDO_SUCCESS) return result; result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks, @@ -1238,20 +1238,20 @@ int vdo_validate_config(const struct vdo_config *config, if (result != VDO_SUCCESS) return result; - result = ASSERT((slab_config.data_blocks >= 1), - "slab must be able to hold at least one block"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT((slab_config.data_blocks >= 1), + "slab must be able to hold at least one block"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(config->physical_blocks > 0, "physical blocks unspecified"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS, - "physical block count %llu exceeds maximum %llu", - (unsigned long long) config->physical_blocks, - (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS, + "physical block count %llu exceeds maximum %llu", + (unsigned long long) config->physical_blocks, + (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS); + if (result != VDO_SUCCESS) return VDO_OUT_OF_RANGE; if (physical_block_count != config->physical_blocks) { @@ -1262,9 +1262,9 @@ int vdo_validate_config(const struct vdo_config *config, } if (logical_block_count > 0) { - result = ASSERT((config->logical_blocks > 0), - "logical blocks unspecified"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT((config->logical_blocks > 0), + "logical blocks unspecified"); + if (result != VDO_SUCCESS) return result; if (logical_block_count != config->logical_blocks) { @@ -1275,19 +1275,19 @@ int vdo_validate_config(const struct vdo_config *config, } } - result = ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, - "logical blocks too large"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, + "logical blocks too large"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(config->recovery_journal_size > 0, - "recovery journal size unspecified"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(config->recovery_journal_size > 0, + "recovery journal size unspecified"); + if (result != VDO_SUCCESS) return result; - result = ASSERT(is_power_of_2(config->recovery_journal_size), - "recovery journal size must be a power of two"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size), + "recovery journal size must be a power of two"); + if (result != VDO_SUCCESS) return result; return result; @@ -1341,8 +1341,8 @@ static int __must_check decode_components(u8 *buffer, size_t *offset, if (result != VDO_SUCCESS) return result; - ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, - "All decoded component data was used"); + VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, + "All decoded component data was used"); return VDO_SUCCESS; } @@ -1416,8 +1416,8 @@ static void vdo_encode_component_states(u8 *buffer, size_t *offset, encode_slab_depot_state_2_0(buffer, offset, states->slab_depot); encode_block_map_state_2_0(buffer, offset, states->block_map); - ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, - "All super block component data was encoded"); + VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, + "All super block component data was encoded"); } /** @@ -1440,8 +1440,8 @@ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states) * Even though the buffer is a full block, to avoid the potential corruption from a torn * write, the entire encoding must fit in the first sector. */ - ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE, - "entire superblock must fit in one sector"); + VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE, + "entire superblock must fit in one sector"); } /** @@ -1476,8 +1476,8 @@ int vdo_decode_super_block(u8 *buffer) checksum = vdo_crc32(buffer, offset); decode_u32_le(buffer, &offset, &saved_checksum); - result = ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE, - "must have decoded entire superblock payload"); + result = VDO_ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE, + "must have decoded entire superblock payload"); if (result != VDO_SUCCESS) return result; diff --git a/drivers/md/dm-vdo/errors.c b/drivers/md/dm-vdo/errors.c index df2498553312..3b5fddad8ddf 100644 --- a/drivers/md/dm-vdo/errors.c +++ b/drivers/md/dm-vdo/errors.c @@ -281,8 +281,9 @@ int uds_register_error_block(const char *block_name, int first_error, .infos = infos, }; - result = ASSERT(first_error < next_free_error, "well-defined error block range"); - if (result != UDS_SUCCESS) + result = VDO_ASSERT(first_error < next_free_error, + "well-defined error block range"); + if (result != VDO_SUCCESS) return result; if (registered_errors.count == registered_errors.allocated) { diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index 8d8d9cf4a24c..e03679e4d1ba 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -59,8 +59,8 @@ struct flusher { */ static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id), - "%s() called from flusher thread", caller); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id), + "%s() called from flusher thread", caller); } /** @@ -272,8 +272,8 @@ static void flush_vdo(struct vdo_completion *completion) int result; assert_on_flusher_thread(flusher, __func__); - result = ASSERT(vdo_is_state_normal(&flusher->state), - "flusher is in normal operation"); + result = VDO_ASSERT(vdo_is_state_normal(&flusher->state), + "flusher is in normal operation"); if (result != VDO_SUCCESS) { vdo_enter_read_only_mode(flusher->vdo, result); vdo_complete_flush(flush); @@ -330,11 +330,11 @@ void vdo_complete_flushes(struct flusher *flusher) if (flush->flush_generation >= oldest_active_generation) return; - ASSERT_LOG_ONLY((flush->flush_generation == - flusher->first_unacknowledged_generation), - "acknowledged next expected flush, %llu, was: %llu", - (unsigned long long) flusher->first_unacknowledged_generation, - (unsigned long long) flush->flush_generation); + VDO_ASSERT_LOG_ONLY((flush->flush_generation == + flusher->first_unacknowledged_generation), + "acknowledged next expected flush, %llu, was: %llu", + (unsigned long long) flusher->first_unacknowledged_generation, + (unsigned long long) flush->flush_generation); vdo_waitq_dequeue_waiter(&flusher->pending_flushes); vdo_complete_flush(flush); flusher->first_unacknowledged_generation++; @@ -400,8 +400,8 @@ void vdo_launch_flush(struct vdo *vdo, struct bio *bio) struct flusher *flusher = vdo->flusher; const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state); - ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s", - code->name); + VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s", + code->name); spin_lock(&flusher->lock); diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index a923432f0a37..03296e7fec12 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -110,14 +110,14 @@ static struct vdo_completion *poll_for_completion(struct simple_work_queue *queu static void enqueue_work_queue_completion(struct simple_work_queue *queue, struct vdo_completion *completion) { - ASSERT_LOG_ONLY(completion->my_queue == NULL, - "completion %px (fn %px) to enqueue (%px) is not already queued (%px)", - completion, completion->callback, queue, completion->my_queue); + VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL, + "completion %px (fn %px) to enqueue (%px) is not already queued (%px)", + completion, completion->callback, queue, completion->my_queue); if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY) completion->priority = queue->common.type->default_priority; - if (ASSERT(completion->priority <= queue->common.type->max_priority, - "priority is in range for queue") != VDO_SUCCESS) + if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority, + "priority is in range for queue") != VDO_SUCCESS) completion->priority = 0; completion->my_queue = &queue->common; @@ -222,9 +222,9 @@ static struct vdo_completion *wait_for_next_completion(struct simple_work_queue static void process_completion(struct simple_work_queue *queue, struct vdo_completion *completion) { - if (ASSERT(completion->my_queue == &queue->common, - "completion %px from queue %px marked as being in this queue (%px)", - completion, queue, completion->my_queue) == UDS_SUCCESS) + if (VDO_ASSERT(completion->my_queue == &queue->common, + "completion %px from queue %px marked as being in this queue (%px)", + completion, queue, completion->my_queue) == VDO_SUCCESS) completion->my_queue = NULL; vdo_run_completion(completion); @@ -319,9 +319,9 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na struct task_struct *thread = NULL; int result; - ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY), - "queue priority count %u within limit %u", type->max_priority, - VDO_WORK_Q_MAX_PRIORITY); + VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY), + "queue priority count %u within limit %u", type->max_priority, + VDO_WORK_Q_MAX_PRIORITY); result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue); if (result != VDO_SUCCESS) diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index e82b4a8c6fc4..61bb48068c3a 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -94,7 +94,7 @@ static void count_all_bios(struct vio *vio, struct bio *bio) */ static void assert_in_bio_zone(struct vio *vio) { - ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context"); + VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context"); assert_vio_in_bio_zone(vio); } @@ -300,7 +300,7 @@ static bool try_bio_map_merge(struct vio *vio) mutex_unlock(&bio_queue_data->lock); /* We don't care about failure of int_map_put in this case. */ - ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds"); + VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds"); return merged; } @@ -345,8 +345,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, const struct admin_state_code *code = vdo_get_admin_state(completion->vdo); - ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); - ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); + VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); + VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); vdo_reset_completion(completion); completion->error_handler = error_handler; diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index ca5bc3be7978..300f9d2d2d5c 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -142,8 +142,8 @@ void vdo_free_logical_zones(struct logical_zones *zones) static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), - "%s() called on correct thread", what); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), + "%s() called on correct thread", what); } /** @@ -247,10 +247,10 @@ void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone, sequence_number_t expected_generation) { assert_on_zone_thread(zone, __func__); - ASSERT_LOG_ONLY((zone->flush_generation == expected_generation), - "logical zone %u flush generation %llu should be %llu before increment", - zone->zone_number, (unsigned long long) zone->flush_generation, - (unsigned long long) expected_generation); + VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation), + "logical zone %u flush generation %llu should be %llu before increment", + zone->zone_number, (unsigned long long) zone->flush_generation, + (unsigned long long) expected_generation); zone->flush_generation++; zone->ios_in_flush_generation = 0; @@ -267,7 +267,7 @@ void vdo_acquire_flush_generation_lock(struct data_vio *data_vio) struct logical_zone *zone = data_vio->logical.zone; assert_on_zone_thread(zone, __func__); - ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal"); + VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal"); data_vio->flush_generation = zone->flush_generation; list_add_tail(&data_vio->write_entry, &zone->write_vios); @@ -332,10 +332,10 @@ void vdo_release_flush_generation_lock(struct data_vio *data_vio) return; list_del_init(&data_vio->write_entry); - ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation), - "data_vio releasing lock on generation %llu is not older than oldest active generation %llu", - (unsigned long long) data_vio->flush_generation, - (unsigned long long) zone->oldest_active_generation); + VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation), + "data_vio releasing lock on generation %llu is not older than oldest active generation %llu", + (unsigned long long) data_vio->flush_generation, + (unsigned long long) zone->oldest_active_generation); if (!update_oldest_active_generation(zone) || zone->notifying) return; diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c index dd5acc582fb3..62bb717c4c50 100644 --- a/drivers/md/dm-vdo/memory-alloc.c +++ b/drivers/md/dm-vdo/memory-alloc.c @@ -385,12 +385,12 @@ void vdo_memory_init(void) void vdo_memory_exit(void) { - ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, - "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", - memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks); - ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, - "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", - memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); + VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, + "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", + memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks); + VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, + "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", + memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); } diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index 5774d8fd5c5a..4d45243161a6 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -86,8 +86,8 @@ int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state, */ static inline void assert_on_packer_thread(struct packer *packer, const char *caller) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id), - "%s() called from packer thread", caller); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id), + "%s() called from packer thread", caller); } /** @@ -569,9 +569,9 @@ void vdo_attempt_packing(struct data_vio *data_vio) assert_on_packer_thread(packer, __func__); - result = ASSERT((status.stage == DATA_VIO_COMPRESSING), - "attempt to pack data_vio not ready for packing, stage: %u", - status.stage); + result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING), + "attempt to pack data_vio not ready for packing, stage: %u", + status.stage); if (result != VDO_SUCCESS) return; @@ -671,7 +671,7 @@ void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion) lock_holder = vdo_forget(data_vio->compression.lock_holder); bin = lock_holder->compression.bin; - ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); + VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); slot = lock_holder->compression.slot; bin->slots_used--; diff --git a/drivers/md/dm-vdo/permassert.h b/drivers/md/dm-vdo/permassert.h index ee978bc115ec..8774dde7927a 100644 --- a/drivers/md/dm-vdo/permassert.h +++ b/drivers/md/dm-vdo/permassert.h @@ -13,7 +13,6 @@ /* Utilities for asserting that certain conditions are met */ #define STRINGIFY(X) #X -#define STRINGIFY_VALUE(X) STRINGIFY(X) /* * A hack to apply the "warn if unused" attribute to an integral expression. @@ -23,19 +22,23 @@ * expression. With optimization enabled, this function contributes no additional instructions, but * the warn_unused_result attribute still applies to the code calling it. */ -static inline int __must_check uds_must_use(int value) +static inline int __must_check vdo_must_use(int value) { return value; } /* Assert that an expression is true and return an error if it is not. */ -#define ASSERT(expr, ...) uds_must_use(__UDS_ASSERT(expr, __VA_ARGS__)) +#define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__)) /* Log a message if the expression is not true. */ -#define ASSERT_LOG_ONLY(expr, ...) __UDS_ASSERT(expr, __VA_ARGS__) +#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__) -#define __UDS_ASSERT(expr, ...) \ - (likely(expr) ? UDS_SUCCESS \ +/* For use by UDS */ +#define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__) +#define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__) + +#define __VDO_ASSERT(expr, ...) \ + (likely(expr) ? VDO_SUCCESS \ : uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__)) /* Log an assertion failure message. */ diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index fadcea23288e..6678f472fb44 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -80,13 +80,13 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t */ void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) { - ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), - "PBN lock must not already have been downgraded"); - ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), - "must not downgrade block map write locks"); - ASSERT_LOG_ONLY(lock->holder_count == 1, - "PBN write lock should have one holder but has %u", - lock->holder_count); + VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), + "PBN lock must not already have been downgraded"); + VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), + "must not downgrade block map write locks"); + VDO_ASSERT_LOG_ONLY(lock->holder_count == 1, + "PBN write lock should have one holder but has %u", + lock->holder_count); /* * data_vio write locks are downgraded in place--the writer retains the hold on the lock. * If this was a compressed write, the holder has not yet journaled its own inc ref, @@ -128,8 +128,8 @@ bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock) */ void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock) { - ASSERT_LOG_ONLY(!lock->has_provisional_reference, - "lock does not have a provisional reference"); + VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference, + "lock does not have a provisional reference"); lock->has_provisional_reference = true; } @@ -221,7 +221,7 @@ static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock INIT_LIST_HEAD(&idle->entry); list_add_tail(&idle->entry, &pool->idle_list); - ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); + VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); pool->borrowed -= 1; } @@ -267,9 +267,9 @@ static void free_pbn_lock_pool(struct pbn_lock_pool *pool) if (pool == NULL) return; - ASSERT_LOG_ONLY(pool->borrowed == 0, - "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", - pool->borrowed); + VDO_ASSERT_LOG_ONLY(pool->borrowed == 0, + "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", + pool->borrowed); vdo_free(pool); } @@ -298,8 +298,8 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool, "no free PBN locks left to borrow"); pool->borrowed += 1; - result = ASSERT(!list_empty(&pool->idle_list), - "idle list should not be empty if pool not at capacity"); + result = VDO_ASSERT(!list_empty(&pool->idle_list), + "idle list should not be empty if pool not at capacity"); if (result != VDO_SUCCESS) return result; @@ -447,7 +447,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock); if (result != VDO_SUCCESS) { - ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); + VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); return result; } @@ -461,8 +461,8 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, if (lock != NULL) { /* The lock is already held, so we don't need the borrowed one. */ return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock)); - result = ASSERT(lock->holder_count > 0, "physical block %llu lock held", - (unsigned long long) pbn); + result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held", + (unsigned long long) pbn); if (result != VDO_SUCCESS) return result; *lock_ptr = lock; @@ -485,8 +485,8 @@ static int allocate_and_lock_block(struct allocation *allocation) int result; struct pbn_lock *lock; - ASSERT_LOG_ONLY(allocation->lock == NULL, - "must not allocate a block while already holding a lock on one"); + VDO_ASSERT_LOG_ONLY(allocation->lock == NULL, + "must not allocate a block while already holding a lock on one"); result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn); if (result != VDO_SUCCESS) @@ -617,8 +617,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone, if (lock == NULL) return; - ASSERT_LOG_ONLY(lock->holder_count > 0, - "should not be releasing a lock that is not held"); + VDO_ASSERT_LOG_ONLY(lock->holder_count > 0, + "should not be releasing a lock that is not held"); lock->holder_count -= 1; if (lock->holder_count > 0) { @@ -627,8 +627,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone, } holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn); - ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", - (unsigned long long) locked_pbn); + VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", + (unsigned long long) locked_pbn); release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator); return_pbn_lock_to_pool(zone->lock_pool, lock); diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c index fc99268d2437..42d3d8d0e4b5 100644 --- a/drivers/md/dm-vdo/priority-table.c +++ b/drivers/md/dm-vdo/priority-table.c @@ -127,8 +127,8 @@ void vdo_reset_priority_table(struct priority_table *table) void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority, struct list_head *entry) { - ASSERT_LOG_ONLY((priority <= table->max_priority), - "entry priority must be valid for the table"); + VDO_ASSERT_LOG_ONLY((priority <= table->max_priority), + "entry priority must be valid for the table"); /* Append the entry to the queue in the specified bucket. */ list_move_tail(entry, &table->buckets[priority].queue); diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index 615755697e60..6df373b88042 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -119,8 +119,8 @@ static bool is_journal_zone_locked(struct recovery_journal *journal, /* Pairs with barrier in vdo_release_journal_entry_lock() */ smp_rmb(); - ASSERT_LOG_ONLY((decrements <= journal_value), - "journal zone lock counter must not underflow"); + VDO_ASSERT_LOG_ONLY((decrements <= journal_value), + "journal zone lock counter must not underflow"); return (journal_value != decrements); } @@ -150,8 +150,8 @@ void vdo_release_recovery_journal_block_reference(struct recovery_journal *journ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); current_value = get_counter(journal, lock_number, zone_type, zone_id); - ASSERT_LOG_ONLY((*current_value >= 1), - "decrement of lock counter must not underflow"); + VDO_ASSERT_LOG_ONLY((*current_value >= 1), + "decrement of lock counter must not underflow"); *current_value -= 1; if (zone_type == VDO_ZONE_TYPE_JOURNAL) { @@ -254,8 +254,8 @@ static inline bool __must_check is_block_full(const struct recovery_journal_bloc static void assert_on_journal_thread(struct recovery_journal *journal, const char *function_name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id), - "%s() called on journal thread", function_name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id), + "%s() called on journal thread", function_name); } /** @@ -353,14 +353,14 @@ static void check_for_drain_complete(struct recovery_journal *journal) if (vdo_is_state_saving(&journal->state)) { if (journal->active_block != NULL) { - ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) || - !is_block_dirty(journal->active_block)), - "journal being saved has clean active block"); + VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) || + !is_block_dirty(journal->active_block)), + "journal being saved has clean active block"); recycle_journal_block(journal->active_block); } - ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), - "all blocks in a journal being saved must be inactive"); + VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), + "all blocks in a journal being saved must be inactive"); } vdo_finish_draining_with_result(&journal->state, result); @@ -800,8 +800,8 @@ void vdo_free_recovery_journal(struct recovery_journal *journal) * requires opening before use. */ if (!vdo_is_state_quiescent(&journal->state)) { - ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), - "journal being freed has no active tail blocks"); + VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), + "journal being freed has no active tail blocks"); } else if (!vdo_is_state_saved(&journal->state) && !list_empty(&journal->active_tail_blocks)) { uds_log_warning("journal being freed has uncommitted entries"); @@ -989,8 +989,8 @@ static void initialize_lock_count(struct recovery_journal *journal) atomic_t *decrement_counter = get_decrement_counter(journal, lock_number); journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0); - ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)), - "count to be initialized not in use"); + VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)), + "count to be initialized not in use"); *journal_value = journal->entries_per_block + 1; atomic_set(decrement_counter, 0); } @@ -1175,13 +1175,13 @@ static void continue_committed_waiter(struct vdo_waiter *waiter, void *context) int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS); bool has_decrement; - ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point, - &data_vio->recovery_journal_point), - "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)", - (unsigned long long) journal->commit_point.sequence_number, - journal->commit_point.entry_count, - (unsigned long long) data_vio->recovery_journal_point.sequence_number, - data_vio->recovery_journal_point.entry_count); + VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point, + &data_vio->recovery_journal_point), + "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)", + (unsigned long long) journal->commit_point.sequence_number, + journal->commit_point.entry_count, + (unsigned long long) data_vio->recovery_journal_point.sequence_number, + data_vio->recovery_journal_point.entry_count); journal->commit_point = data_vio->recovery_journal_point; data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS; @@ -1281,8 +1281,8 @@ static void complete_write(struct vdo_completion *completion) journal->last_write_acknowledged = block->sequence_number; last_active_block = get_journal_block(&journal->active_tail_blocks); - ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number), - "completed journal write is still active"); + VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number), + "completed journal write is still active"); notify_commit_waiters(journal); @@ -1456,8 +1456,8 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal, return; } - ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0, - "journal lock not held for new entry"); + VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0, + "journal lock not held for new entry"); vdo_advance_journal_point(&journal->append_point, journal->entries_per_block); vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); @@ -1564,13 +1564,13 @@ void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journ if (sequence_number == 0) return; - ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL), - "invalid lock count increment from journal zone"); + VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL), + "invalid lock count increment from journal zone"); lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); current_value = get_counter(journal, lock_number, zone_type, zone_id); - ASSERT_LOG_ONLY(*current_value < U16_MAX, - "increment of lock counter must not overflow"); + VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX, + "increment of lock counter must not overflow"); if (*current_value == 0) { /* diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index bfcdedeedb86..c7abb8078336 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -976,8 +976,8 @@ find_entry_starting_next_page(struct repair_completion *repair, if (needs_sort) { struct numbered_block_mapping *just_sorted_entry = sort_next_heap_element(repair); - ASSERT_LOG_ONLY(just_sorted_entry < current_entry, - "heap is returning elements in an unexpected order"); + VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry, + "heap is returning elements in an unexpected order"); } current_entry--; @@ -1129,8 +1129,8 @@ static void recover_block_map(struct vdo_completion *completion) repair->current_entry = &repair->entries[repair->block_map_entry_count - 1]; first_sorted_entry = sort_next_heap_element(repair); - ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry, - "heap is returning elements in an unexpected order"); + VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry, + "heap is returning elements in an unexpected order"); /* Prevent any page from being processed until all pages have been launched. */ repair->launching = true; @@ -1489,8 +1489,8 @@ static int extract_new_mappings(struct repair_completion *repair) repair->block_map_entry_count++; } - result = ASSERT((repair->block_map_entry_count <= repair->entry_count), - "approximate entry count is an upper bound"); + result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count), + "approximate entry count is an upper bound"); if (result != VDO_SUCCESS) vdo_enter_read_only_mode(vdo, result); diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 97208c9e0062..00746de09c12 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -149,7 +149,7 @@ static void mark_slab_journal_dirty(struct slab_journal *journal, sequence_numbe struct slab_journal *dirty_journal; struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; - ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); + VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); journal->recovery_lock = lock; list_for_each_entry_reverse(dirty_journal, dirty_list, dirty_entry) { @@ -216,7 +216,7 @@ static u8 __must_check compute_fullness_hint(struct slab_depot *depot, { block_count_t hint; - ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23"); + VDO_ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23"); if (free_blocks == 0) return 0; @@ -532,13 +532,13 @@ static void adjust_slab_journal_block_reference(struct slab_journal *journal, return; } - ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero"); + VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero"); lock = get_lock(journal, sequence_number); if (adjustment < 0) { - ASSERT_LOG_ONLY((-adjustment <= lock->count), - "adjustment %d of lock count %u for slab journal block %llu must not underflow", - adjustment, lock->count, - (unsigned long long) sequence_number); + VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count), + "adjustment %d of lock count %u for slab journal block %llu must not underflow", + adjustment, lock->count, + (unsigned long long) sequence_number); } lock->count += adjustment; @@ -661,16 +661,16 @@ static void reopen_slab_journal(struct vdo_slab *slab) struct slab_journal *journal = &slab->journal; sequence_number_t block; - ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, - "vdo_slab journal's active block empty before reopening"); + VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, + "vdo_slab journal's active block empty before reopening"); journal->head = journal->tail; initialize_journal_state(journal); /* Ensure no locks are spuriously held on an empty journal. */ for (block = 1; block <= journal->size; block++) { - ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), - "Scrubbed journal's block %llu is not locked", - (unsigned long long) block); + VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), + "Scrubbed journal's block %llu is not locked", + (unsigned long long) block); } add_entries(journal); @@ -757,7 +757,7 @@ static void write_slab_journal_block(struct vdo_waiter *waiter, void *context) /* Copy the tail block into the vio. */ memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE); - ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull"); + VDO_ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull"); if (unused_entries > 0) { /* * Release the per-entry locks for any unused entries in the block we are about to @@ -907,22 +907,22 @@ static void add_entry(struct slab_journal *journal, physical_block_number_t pbn, struct packed_slab_journal_block *block = journal->block; int result; - result = ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, - &recovery_point), - "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u", - (unsigned long long) recovery_point.sequence_number, - recovery_point.entry_count, - (unsigned long long) journal->tail_header.recovery_point.sequence_number, - journal->tail_header.recovery_point.entry_count); + result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, + &recovery_point), + "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u", + (unsigned long long) recovery_point.sequence_number, + recovery_point.entry_count, + (unsigned long long) journal->tail_header.recovery_point.sequence_number, + journal->tail_header.recovery_point.entry_count); if (result != VDO_SUCCESS) { vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); return; } if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) { - result = ASSERT((journal->tail_header.entry_count < - journal->full_entries_per_block), - "block has room for full entries"); + result = VDO_ASSERT((journal->tail_header.entry_count < + journal->full_entries_per_block), + "block has room for full entries"); if (result != VDO_SUCCESS) { vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); @@ -1371,8 +1371,8 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab) */ static void prioritize_slab(struct vdo_slab *slab) { - ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), - "a slab must not already be on a ring when prioritizing"); + VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), + "a slab must not already be on a ring when prioritizing"); slab->priority = calculate_slab_priority(slab); vdo_priority_table_enqueue(slab->allocator->prioritized_slabs, slab->priority, &slab->allocq_entry); @@ -1655,8 +1655,8 @@ static int __must_check adjust_reference_count(struct vdo_slab *slab, * the last time it was clean. We must release the per-entry slab journal lock for * the entry associated with the update we are now doing. */ - result = ASSERT(is_valid_journal_point(slab_journal_point), - "Reference count adjustments need slab journal points."); + result = VDO_ASSERT(is_valid_journal_point(slab_journal_point), + "Reference count adjustments need slab journal points."); if (result != VDO_SUCCESS) return result; @@ -1825,16 +1825,16 @@ static void add_entries(struct slab_journal *journal) * scrubbing thresholds, this should never happen. */ if (lock->count > 0) { - ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, - "New block has locks, but journal is not full"); + VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, + "New block has locks, but journal is not full"); /* * The blocking threshold must let the journal fill up if the new * block has locks; if the blocking threshold is smaller than the * journal size, the new block cannot possibly have locks already. */ - ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), - "New block can have locks already iff blocking threshold is at the end of the journal"); + VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), + "New block can have locks already iff blocking threshold is at the end of the journal"); WRITE_ONCE(journal->events->disk_full_count, journal->events->disk_full_count + 1); @@ -2361,9 +2361,9 @@ static int allocate_slab_counters(struct vdo_slab *slab) int result; size_t index, bytes; - result = ASSERT(slab->reference_blocks == NULL, - "vdo_slab %u doesn't allocate refcounts twice", - slab->slab_number); + result = VDO_ASSERT(slab->reference_blocks == NULL, + "vdo_slab %u doesn't allocate refcounts twice", + slab->slab_number); if (result != VDO_SUCCESS) return result; @@ -2503,9 +2503,9 @@ static void load_slab_journal(struct vdo_slab *slab) * 1. This is impossible, due to the scrubbing threshold, on a real system, so * don't bother reading the (bogus) data off disk. */ - ASSERT_LOG_ONLY(((journal->size < 16) || - (journal->scrubbing_threshold < (journal->size - 1))), - "Scrubbing threshold protects against reads of unwritten slab journal blocks"); + VDO_ASSERT_LOG_ONLY(((journal->size < 16) || + (journal->scrubbing_threshold < (journal->size - 1))), + "Scrubbing threshold protects against reads of unwritten slab journal blocks"); vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab)); return; @@ -2519,8 +2519,8 @@ static void register_slab_for_scrubbing(struct vdo_slab *slab, bool high_priorit { struct slab_scrubber *scrubber = &slab->allocator->scrubber; - ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), - "slab to be scrubbed is unrecovered"); + VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), + "slab to be scrubbed is unrecovered"); if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING) return; @@ -2547,17 +2547,17 @@ static void queue_slab(struct vdo_slab *slab) block_count_t free_blocks; int result; - ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), + VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), "a requeued slab must not already be on a ring"); if (vdo_is_read_only(allocator->depot->vdo)) return; free_blocks = slab->free_blocks; - result = ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks), - "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)", - slab->slab_number, (unsigned long long) free_blocks, - (unsigned long long) allocator->depot->slab_config.data_blocks); + result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks), + "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)", + slab->slab_number, (unsigned long long) free_blocks, + (unsigned long long) allocator->depot->slab_config.data_blocks); if (result != VDO_SUCCESS) { vdo_enter_read_only_mode(allocator->depot->vdo, result); return; @@ -2880,9 +2880,9 @@ static void apply_journal_entries(struct vdo_completion *completion) * At the end of rebuild, the reference counters should be accurate to the end of the * journal we just applied. */ - result = ASSERT(!vdo_before_journal_point(&last_entry_applied, - &ref_counts_point), - "Refcounts are not more accurate than the slab journal"); + result = VDO_ASSERT(!vdo_before_journal_point(&last_entry_applied, + &ref_counts_point), + "Refcounts are not more accurate than the slab journal"); if (result != VDO_SUCCESS) { abort_scrubbing(scrubber, result); return; @@ -2993,8 +2993,8 @@ static void scrub_slabs(struct block_allocator *allocator, struct vdo_completion static inline void assert_on_allocator_thread(thread_id_t thread_id, const char *function_name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id), - "%s called on correct thread", function_name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id), + "%s called on correct thread", function_name); } static void register_slab_with_allocator(struct block_allocator *allocator, @@ -3142,8 +3142,8 @@ static int __must_check allocate_slab_block(struct vdo_slab *slab, if (!search_reference_blocks(slab, &free_index)) return VDO_NO_SPACE; - ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), - "free block must have ref count of zero"); + VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), + "free block must have ref count of zero"); make_provisional_reference(slab, free_index); adjust_free_block_count(slab, false); @@ -3850,8 +3850,8 @@ static bool __must_check release_recovery_journal_lock(struct slab_journal *jour sequence_number_t recovery_lock) { if (recovery_lock > journal->recovery_lock) { - ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), - "slab journal recovery lock is not older than the recovery journal head"); + VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), + "slab journal recovery lock is not older than the recovery journal head"); return false; } @@ -4665,8 +4665,8 @@ int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot, return VDO_INCREMENT_TOO_SMALL; /* Generate the depot configuration for the new block count. */ - ASSERT_LOG_ONLY(depot->first_block == partition->offset, - "New slab depot partition doesn't change origin"); + VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset, + "New slab depot partition doesn't change origin"); result = vdo_configure_slab_depot(partition, depot->slab_config, depot->zone_count, &new_state); if (result != VDO_SUCCESS) @@ -4740,7 +4740,7 @@ static void register_new_slabs(void *context, zone_count_t zone_number, */ void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent) { - ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use"); + VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use"); vdo_schedule_operation(depot->action_manager, VDO_ADMIN_STATE_SUSPENDED_OPERATION, NULL, register_new_slabs, @@ -4796,8 +4796,8 @@ static void do_drain_step(struct vdo_completion *completion) return; case VDO_DRAIN_ALLOCATOR_STEP_FINISHED: - ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool), - "vio pool not busy"); + VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool), + "vio pool not busy"); vdo_finish_draining_with_result(&allocator->state, completion->result); return; diff --git a/drivers/md/dm-vdo/thread-registry.c b/drivers/md/dm-vdo/thread-registry.c index 03e2f45e8e78..d4a077d58c60 100644 --- a/drivers/md/dm-vdo/thread-registry.c +++ b/drivers/md/dm-vdo/thread-registry.c @@ -44,7 +44,7 @@ void vdo_register_thread(struct thread_registry *registry, list_add_tail_rcu(&new_thread->links, ®istry->links); spin_unlock(®istry->lock); - ASSERT_LOG_ONLY(!found_it, "new thread not already in registry"); + VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry"); if (found_it) { /* Ensure no RCU iterators see it before re-initializing. */ synchronize_rcu(); @@ -67,7 +67,7 @@ void vdo_unregister_thread(struct thread_registry *registry) } spin_unlock(®istry->lock); - ASSERT_LOG_ONLY(found_it, "thread found in registry"); + VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry"); if (found_it) { /* Ensure no RCU iterators see it before re-initializing. */ synchronize_rcu(); diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index b4dd0634a5cb..11be2ab17e29 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -425,9 +425,9 @@ int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id, type = &default_queue_type; if (thread->queue != NULL) { - return ASSERT(vdo_work_queue_type_is(thread->queue, type), - "already constructed vdo thread %u is of the correct type", - thread_id); + return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type), + "already constructed vdo thread %u is of the correct type", + thread_id); } thread->vdo = vdo; @@ -448,8 +448,8 @@ static int register_vdo(struct vdo *vdo) int result; write_lock(®istry.lock); - result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, - "VDO not already registered"); + result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, + "VDO not already registered"); if (result == VDO_SUCCESS) { INIT_LIST_HEAD(&vdo->registration); list_add_tail(&vdo->registration, ®istry.links); @@ -1050,8 +1050,8 @@ int vdo_register_read_only_listener(struct vdo *vdo, void *listener, struct read_only_listener *read_only_listener; int result; - result = ASSERT(thread_id != vdo->thread_config.dedupe_thread, - "read only listener not registered on dedupe thread"); + result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread, + "read only listener not registered on dedupe thread"); if (result != VDO_SUCCESS) return result; @@ -1704,8 +1704,8 @@ void vdo_dump_status(const struct vdo *vdo) */ void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), - "%s called on admin thread", name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), + "%s called on admin thread", name); } /** @@ -1718,9 +1718,9 @@ void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone, const char *name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == - vdo->thread_config.logical_threads[logical_zone]), - "%s called on logical thread", name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == + vdo->thread_config.logical_threads[logical_zone]), + "%s called on logical thread", name); } /** @@ -1733,9 +1733,9 @@ void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logic void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone, const char *name) { - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == - vdo->thread_config.physical_threads[physical_zone]), - "%s called on physical thread", name); + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == + vdo->thread_config.physical_threads[physical_zone]), + "%s called on physical thread", name); } /** @@ -1773,7 +1773,7 @@ int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn, /* With the PBN already checked, we should always succeed in finding a slab. */ slab = vdo_get_slab(vdo->depot, pbn); - result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); + result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); if (result != VDO_SUCCESS) return result; diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index 83c36f7590de..b1e4e604c2c3 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -82,14 +82,14 @@ int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, struct bio *bio; int result; - result = ASSERT(block_count <= MAX_BLOCKS_PER_VIO, - "block count %u does not exceed maximum %u", block_count, - MAX_BLOCKS_PER_VIO); + result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO, + "block count %u does not exceed maximum %u", block_count, + MAX_BLOCKS_PER_VIO); if (result != VDO_SUCCESS) return result; - result = ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)), - "%d is a metadata type", vio_type); + result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)), + "%d is a metadata type", vio_type); if (result != VDO_SUCCESS) return result; @@ -363,13 +363,13 @@ void free_vio_pool(struct vio_pool *pool) return; /* Remove all available vios from the object pool. */ - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), - "VIO pool must not have any waiters when being freed"); - ASSERT_LOG_ONLY((pool->busy_count == 0), - "VIO pool must not have %zu busy entries when being freed", - pool->busy_count); - ASSERT_LOG_ONLY(list_empty(&pool->busy), - "VIO pool must not have busy entries when being freed"); + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), + "VIO pool must not have any waiters when being freed"); + VDO_ASSERT_LOG_ONLY((pool->busy_count == 0), + "VIO pool must not have %zu busy entries when being freed", + pool->busy_count); + VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy), + "VIO pool must not have busy entries when being freed"); list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) { list_del(&pooled->pool_entry); @@ -377,8 +377,8 @@ void free_vio_pool(struct vio_pool *pool) pool->size--; } - ASSERT_LOG_ONLY(pool->size == 0, - "VIO pool must not have missing entries when being freed"); + VDO_ASSERT_LOG_ONLY(pool->size == 0, + "VIO pool must not have missing entries when being freed"); vdo_free(vdo_forget(pool->buffer)); vdo_free(pool); @@ -403,8 +403,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter) { struct pooled_vio *pooled; - ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), - "acquire from active vio_pool called from correct thread"); + VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), + "acquire from active vio_pool called from correct thread"); if (list_empty(&pool->available)) { vdo_waitq_enqueue_waiter(&pool->waiting, waiter); @@ -424,8 +424,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter) */ void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio) { - ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), - "vio pool entry returned on same thread as it was acquired"); + VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), + "vio pool entry returned on same thread as it was acquired"); vio->vio.completion.error_handler = NULL; vio->vio.completion.parent = NULL; @@ -465,8 +465,8 @@ void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio) * shouldn't exist. */ default: - ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush", - bio_op(bio)); + VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush", + bio_op(bio)); } if ((bio->bi_opf & REQ_PREFLUSH) != 0) diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h index fbfee5e3415d..3490e9f59b04 100644 --- a/drivers/md/dm-vdo/vio.h +++ b/drivers/md/dm-vdo/vio.h @@ -67,10 +67,10 @@ static inline void assert_vio_in_bio_zone(struct vio *vio) thread_id_t expected = get_vio_bio_zone_thread_id(vio); thread_id_t thread_id = vdo_get_callback_thread_id(); - ASSERT_LOG_ONLY((expected == thread_id), - "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", - (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, - expected); + VDO_ASSERT_LOG_ONLY((expected == thread_id), + "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", + (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, + expected); } int vdo_create_bio(struct bio **bio_ptr); -- cgit v1.2.3 From a9da0fb6d8c61e354611c181d505fd417aad51f5 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 10 Feb 2024 11:05:15 -0600 Subject: dm vdo: remove all sysfs interfaces Also update target major version number. All info is (or will be) accessible through alternative interfaces (e.g. "dmsetup message", module params, etc). Signed-off-by: Mike Snitzer Signed-off-by: Ken Raeburn Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/Makefile | 4 - drivers/md/dm-vdo/dedupe.c | 70 +- drivers/md/dm-vdo/dedupe.h | 2 +- drivers/md/dm-vdo/dm-vdo-target.c | 44 +- drivers/md/dm-vdo/logger.c | 5 - drivers/md/dm-vdo/logger.h | 2 - drivers/md/dm-vdo/pool-sysfs-stats.c | 2063 ---------------------------------- drivers/md/dm-vdo/pool-sysfs.c | 198 ---- drivers/md/dm-vdo/pool-sysfs.h | 19 - drivers/md/dm-vdo/status-codes.c | 1 - drivers/md/dm-vdo/status-codes.h | 2 - drivers/md/dm-vdo/sysfs.c | 82 -- drivers/md/dm-vdo/uds-sysfs.c | 187 --- drivers/md/dm-vdo/uds-sysfs.h | 12 - drivers/md/dm-vdo/vdo.c | 54 +- drivers/md/dm-vdo/vdo.h | 13 - 16 files changed, 9 insertions(+), 2749 deletions(-) delete mode 100644 drivers/md/dm-vdo/pool-sysfs-stats.c delete mode 100644 drivers/md/dm-vdo/pool-sysfs.c delete mode 100644 drivers/md/dm-vdo/pool-sysfs.h delete mode 100644 drivers/md/dm-vdo/sysfs.c delete mode 100644 drivers/md/dm-vdo/uds-sysfs.c delete mode 100644 drivers/md/dm-vdo/uds-sysfs.h (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/Makefile b/drivers/md/dm-vdo/Makefile index 502a7a0acbdb..33e09abc6acd 100644 --- a/drivers/md/dm-vdo/Makefile +++ b/drivers/md/dm-vdo/Makefile @@ -28,19 +28,15 @@ dm-vdo-objs := \ packer.o \ permassert.o \ physical-zone.o \ - pool-sysfs.o \ - pool-sysfs-stats.o \ priority-table.o \ recovery-journal.o \ repair.o \ slab-depot.o \ status-codes.o \ string-utils.o \ - sysfs.o \ thread-device.o \ thread-registry.o \ thread-utils.o \ - uds-sysfs.o \ vdo.o \ vio.o \ wait-queue.o \ diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 52bdf657db64..8550a9a7958b 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -120,7 +120,6 @@ #include #include #include -#include #include #include #include @@ -279,7 +278,6 @@ struct hash_lock { struct hash_zones { struct action_manager *manager; - struct kobject dedupe_directory; struct uds_parameters parameters; struct uds_index_session *index_session; struct ratelimit_state ratelimiter; @@ -2022,56 +2020,6 @@ void vdo_share_compressed_write_lock(struct data_vio *data_vio, VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment"); } -static void dedupe_kobj_release(struct kobject *directory) -{ - vdo_free(container_of(directory, struct hash_zones, dedupe_directory)); -} - -static ssize_t dedupe_status_show(struct kobject *directory, struct attribute *attr, - char *buf) -{ - struct uds_attribute *ua = container_of(attr, struct uds_attribute, attr); - struct hash_zones *zones = container_of(directory, struct hash_zones, - dedupe_directory); - - if (ua->show_string != NULL) - return sprintf(buf, "%s\n", ua->show_string(zones)); - else - return -EINVAL; -} - -static ssize_t dedupe_status_store(struct kobject *kobj __always_unused, - struct attribute *attr __always_unused, - const char *buf __always_unused, - size_t length __always_unused) -{ - return -EINVAL; -} - -/*----------------------------------------------------------------------*/ - -static const struct sysfs_ops dedupe_sysfs_ops = { - .show = dedupe_status_show, - .store = dedupe_status_store, -}; - -static struct uds_attribute dedupe_status_attribute = { - .attr = {.name = "status", .mode = 0444, }, - .show_string = vdo_get_dedupe_index_state_name, -}; - -static struct attribute *dedupe_attrs[] = { - &dedupe_status_attribute.attr, - NULL, -}; -ATTRIBUTE_GROUPS(dedupe); - -static const struct kobj_type dedupe_directory_type = { - .release = dedupe_kobj_release, - .sysfs_ops = &dedupe_sysfs_ops, - .default_groups = dedupe_groups, -}; - static void start_uds_queue(void *ptr) { /* @@ -2266,7 +2214,6 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones) vdo_initialize_completion(&zones->completion, vdo, VDO_HASH_ZONES_COMPLETION); vdo_set_completion_callback(&zones->completion, change_dedupe_state, vdo->thread_config.dedupe_thread); - kobject_init(&zones->dedupe_directory, &dedupe_directory_type); return VDO_SUCCESS; } @@ -2537,10 +2484,7 @@ void vdo_free_hash_zones(struct hash_zones *zones) vdo_finish_dedupe_index(zones); ratelimit_state_exit(&zones->ratelimiter); - if (vdo_get_admin_state_code(&zones->state) == VDO_ADMIN_STATE_NEW) - vdo_free(zones); - else - kobject_put(&zones->dedupe_directory); + vdo_free(zones); } static void initiate_suspend_index(struct admin_state *state) @@ -3047,17 +2991,9 @@ int vdo_message_dedupe_index(struct hash_zones *zones, const char *name) return -EINVAL; } -int vdo_add_dedupe_index_sysfs(struct hash_zones *zones) +void vdo_set_dedupe_state_normal(struct hash_zones *zones) { - int result = kobject_add(&zones->dedupe_directory, - &zones->completion.vdo->vdo_directory, "dedupe"); - - if (result == 0) { - vdo_set_admin_state_code(&zones->state, - VDO_ADMIN_STATE_NORMAL_OPERATION); - } - - return result; + vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NORMAL_OPERATION); } /* If create_flag, create a new index without first attempting to load an existing index. */ diff --git a/drivers/md/dm-vdo/dedupe.h b/drivers/md/dm-vdo/dedupe.h index 1566fc972ea7..9000d6f3eece 100644 --- a/drivers/md/dm-vdo/dedupe.h +++ b/drivers/md/dm-vdo/dedupe.h @@ -97,7 +97,7 @@ u64 vdo_get_dedupe_index_timeout_count(struct hash_zones *zones); int vdo_message_dedupe_index(struct hash_zones *zones, const char *name); -int vdo_add_dedupe_index_sysfs(struct hash_zones *zones); +void vdo_set_dedupe_state_normal(struct hash_zones *zones); void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag); diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 5f607dbb16e9..c6ee8161ba62 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -27,7 +27,6 @@ #include "logger.h" #include "memory-alloc.h" #include "message-stats.h" -#include "pool-sysfs.h" #include "recovery-journal.h" #include "repair.h" #include "slab-depot.h" @@ -36,7 +35,6 @@ #include "thread-device.h" #include "thread-registry.h" #include "types.h" -#include "uds-sysfs.h" #include "vdo.h" #include "vio.h" @@ -54,7 +52,6 @@ enum admin_phases { GROW_PHYSICAL_PHASE_END, GROW_PHYSICAL_PHASE_ERROR, LOAD_PHASE_START, - LOAD_PHASE_STATS, LOAD_PHASE_LOAD_DEPOT, LOAD_PHASE_MAKE_DIRTY, LOAD_PHASE_PREPARE_TO_ALLOCATE, @@ -104,7 +101,6 @@ static const char * const ADMIN_PHASE_NAMES[] = { "GROW_PHYSICAL_PHASE_END", "GROW_PHYSICAL_PHASE_ERROR", "LOAD_PHASE_START", - "LOAD_PHASE_STATS", "LOAD_PHASE_LOAD_DEPOT", "LOAD_PHASE_MAKE_DIRTY", "LOAD_PHASE_PREPARE_TO_ALLOCATE", @@ -947,8 +943,8 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits) * blocked task warnings in kernel logs. In order to avoid these warnings, we choose to * use the smallest reasonable value. * - * The value is displayed in sysfs, and also used by dm-thin to determine whether to pass - * down discards. The block layer splits large discards on this boundary when this is set. + * The value is used by dm-thin to determine whether to pass down discards. The block layer + * splits large discards on this boundary when this is set. */ limits->max_discard_sectors = (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK); @@ -2174,32 +2170,6 @@ static enum slab_depot_load_type get_load_type(struct vdo *vdo) return VDO_SLAB_DEPOT_NORMAL_LOAD; } -/** - * vdo_initialize_kobjects() - Initialize the vdo sysfs directory. - * @vdo: The vdo being initialized. - * - * Return: VDO_SUCCESS or an error code. - */ -static int vdo_initialize_kobjects(struct vdo *vdo) -{ - int result; - struct dm_target *target = vdo->device_config->owning_target; - struct mapped_device *md = dm_table_get_md(target->table); - - kobject_init(&vdo->vdo_directory, &vdo_directory_type); - vdo->sysfs_added = true; - result = kobject_add(&vdo->vdo_directory, &disk_to_dev(dm_disk(md))->kobj, - "vdo"); - if (result != 0) - return VDO_CANT_ADD_SYSFS_NODE; - - result = vdo_add_dedupe_index_sysfs(vdo->hash_zones); - if (result != 0) - return VDO_CANT_ADD_SYSFS_NODE; - - return vdo_add_sysfs_stats_dir(vdo); -} - /** * load_callback() - Callback to do the destructive parts of loading a VDO. * @completion: The sub-task completion. @@ -2225,11 +2195,8 @@ static void load_callback(struct vdo_completion *completion) vdo_allow_read_only_mode_entry(completion); return; - case LOAD_PHASE_STATS: - vdo_continue_completion(completion, vdo_initialize_kobjects(vdo)); - return; - case LOAD_PHASE_LOAD_DEPOT: + vdo_set_dedupe_state_normal(vdo->hash_zones); if (vdo_is_read_only(vdo)) { /* * In read-only mode we don't use the allocator and it may not even be @@ -2866,7 +2833,7 @@ static void vdo_resume(struct dm_target *ti) static struct target_type vdo_target_bio = { .features = DM_TARGET_SINGLETON, .name = "vdo", - .version = { 8, 2, 0 }, + .version = { 9, 0, 0 }, .module = THIS_MODULE, .ctr = vdo_ctr, .dtr = vdo_dtr, @@ -2905,8 +2872,6 @@ static int __init vdo_init(void) /* Memory tracking must be initialized first for accurate accounting. */ vdo_memory_init(); - uds_init_sysfs(); - vdo_initialize_thread_device_registry(); vdo_initialize_device_registry_once(); uds_log_info("loaded version %s", CURRENT_VERSION); @@ -2933,7 +2898,6 @@ static int __init vdo_init(void) static void __exit vdo_exit(void) { vdo_module_destroy(); - uds_put_sysfs(); /* Memory tracking cleanup must be done last. */ vdo_memory_exit(); } diff --git a/drivers/md/dm-vdo/logger.c b/drivers/md/dm-vdo/logger.c index a50edb0331fc..bac28fff622d 100644 --- a/drivers/md/dm-vdo/logger.c +++ b/drivers/md/dm-vdo/logger.c @@ -56,11 +56,6 @@ int uds_get_log_level(void) return log_level; } -void uds_set_log_level(int new_log_level) -{ - log_level = new_log_level; -} - int uds_log_string_to_priority(const char *string) { int i; diff --git a/drivers/md/dm-vdo/logger.h b/drivers/md/dm-vdo/logger.h index 4e2f18042ba7..ceb07aa3231f 100644 --- a/drivers/md/dm-vdo/logger.h +++ b/drivers/md/dm-vdo/logger.h @@ -37,8 +37,6 @@ int uds_get_log_level(void); -void uds_set_log_level(int new_log_level); - int uds_log_string_to_priority(const char *string); const char *uds_log_priority_to_string(int priority); diff --git a/drivers/md/dm-vdo/pool-sysfs-stats.c b/drivers/md/dm-vdo/pool-sysfs-stats.c deleted file mode 100644 index ae3838894a1c..000000000000 --- a/drivers/md/dm-vdo/pool-sysfs-stats.c +++ /dev/null @@ -1,2063 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2023 Red Hat - */ - -#include - -#include "logger.h" -#include "string-utils.h" - -#include "dedupe.h" -#include "pool-sysfs.h" -#include "statistics.h" -#include "vdo.h" - -struct pool_stats_attribute { - struct attribute attr; - ssize_t (*print)(struct vdo_statistics *stats, char *buf); -}; - -static ssize_t pool_stats_attr_show(struct kobject *directory, - struct attribute *attr, - char *buf) -{ - ssize_t size; - struct pool_stats_attribute *pool_stats_attr = - container_of(attr, struct pool_stats_attribute, attr); - struct vdo *vdo = container_of(directory, struct vdo, stats_directory); - - if (pool_stats_attr->print == NULL) - return -EINVAL; - - mutex_lock(&vdo->stats_mutex); - vdo_fetch_statistics(vdo, &vdo->stats_buffer); - size = pool_stats_attr->print(&vdo->stats_buffer, buf); - mutex_unlock(&vdo->stats_mutex); - - return size; -} - -const struct sysfs_ops vdo_pool_stats_sysfs_ops = { - .show = pool_stats_attr_show, - .store = NULL, -}; - -/* Number of blocks used for data */ -static ssize_t -pool_stats_print_data_blocks_used(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->data_blocks_used); -} - -static struct pool_stats_attribute pool_stats_attr_data_blocks_used = { - .attr = { .name = "data_blocks_used", .mode = 0444, }, - .print = pool_stats_print_data_blocks_used, -}; - -/* Number of blocks used for VDO metadata */ -static ssize_t -pool_stats_print_overhead_blocks_used(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->overhead_blocks_used); -} - -static struct pool_stats_attribute pool_stats_attr_overhead_blocks_used = { - .attr = { .name = "overhead_blocks_used", .mode = 0444, }, - .print = pool_stats_print_overhead_blocks_used, -}; - -/* Number of logical blocks that are currently mapped to physical blocks */ -static ssize_t -pool_stats_print_logical_blocks_used(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->logical_blocks_used); -} - -static struct pool_stats_attribute pool_stats_attr_logical_blocks_used = { - .attr = { .name = "logical_blocks_used", .mode = 0444, }, - .print = pool_stats_print_logical_blocks_used, -}; - -/* number of physical blocks */ -static ssize_t -pool_stats_print_physical_blocks(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->physical_blocks); -} - -static struct pool_stats_attribute pool_stats_attr_physical_blocks = { - .attr = { .name = "physical_blocks", .mode = 0444, }, - .print = pool_stats_print_physical_blocks, -}; - -/* number of logical blocks */ -static ssize_t -pool_stats_print_logical_blocks(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->logical_blocks); -} - -static struct pool_stats_attribute pool_stats_attr_logical_blocks = { - .attr = { .name = "logical_blocks", .mode = 0444, }, - .print = pool_stats_print_logical_blocks, -}; - -/* Size of the block map page cache, in bytes */ -static ssize_t -pool_stats_print_block_map_cache_size(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map_cache_size); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_cache_size = { - .attr = { .name = "block_map_cache_size", .mode = 0444, }, - .print = pool_stats_print_block_map_cache_size, -}; - -/* The physical block size */ -static ssize_t -pool_stats_print_block_size(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_size); -} - -static struct pool_stats_attribute pool_stats_attr_block_size = { - .attr = { .name = "block_size", .mode = 0444, }, - .print = pool_stats_print_block_size, -}; - -/* Number of times the VDO has successfully recovered */ -static ssize_t -pool_stats_print_complete_recoveries(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->complete_recoveries); -} - -static struct pool_stats_attribute pool_stats_attr_complete_recoveries = { - .attr = { .name = "complete_recoveries", .mode = 0444, }, - .print = pool_stats_print_complete_recoveries, -}; - -/* Number of times the VDO has recovered from read-only mode */ -static ssize_t -pool_stats_print_read_only_recoveries(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->read_only_recoveries); -} - -static struct pool_stats_attribute pool_stats_attr_read_only_recoveries = { - .attr = { .name = "read_only_recoveries", .mode = 0444, }, - .print = pool_stats_print_read_only_recoveries, -}; - -/* String describing the operating mode of the VDO */ -static ssize_t -pool_stats_print_mode(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%s\n", stats->mode); -} - -static struct pool_stats_attribute pool_stats_attr_mode = { - .attr = { .name = "mode", .mode = 0444, }, - .print = pool_stats_print_mode, -}; - -/* Whether the VDO is in recovery mode */ -static ssize_t -pool_stats_print_in_recovery_mode(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%d\n", stats->in_recovery_mode); -} - -static struct pool_stats_attribute pool_stats_attr_in_recovery_mode = { - .attr = { .name = "in_recovery_mode", .mode = 0444, }, - .print = pool_stats_print_in_recovery_mode, -}; - -/* What percentage of recovery mode work has been completed */ -static ssize_t -pool_stats_print_recovery_percentage(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->recovery_percentage); -} - -static struct pool_stats_attribute pool_stats_attr_recovery_percentage = { - .attr = { .name = "recovery_percentage", .mode = 0444, }, - .print = pool_stats_print_recovery_percentage, -}; - -/* Number of compressed data items written since startup */ -static ssize_t -pool_stats_print_packer_compressed_fragments_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->packer.compressed_fragments_written); -} - -static struct pool_stats_attribute pool_stats_attr_packer_compressed_fragments_written = { - .attr = { .name = "packer_compressed_fragments_written", .mode = 0444, }, - .print = pool_stats_print_packer_compressed_fragments_written, -}; - -/* Number of blocks containing compressed items written since startup */ -static ssize_t -pool_stats_print_packer_compressed_blocks_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->packer.compressed_blocks_written); -} - -static struct pool_stats_attribute pool_stats_attr_packer_compressed_blocks_written = { - .attr = { .name = "packer_compressed_blocks_written", .mode = 0444, }, - .print = pool_stats_print_packer_compressed_blocks_written, -}; - -/* Number of VIOs that are pending in the packer */ -static ssize_t -pool_stats_print_packer_compressed_fragments_in_packer(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->packer.compressed_fragments_in_packer); -} - -static struct pool_stats_attribute pool_stats_attr_packer_compressed_fragments_in_packer = { - .attr = { .name = "packer_compressed_fragments_in_packer", .mode = 0444, }, - .print = pool_stats_print_packer_compressed_fragments_in_packer, -}; - -/* The total number of slabs from which blocks may be allocated */ -static ssize_t -pool_stats_print_allocator_slab_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->allocator.slab_count); -} - -static struct pool_stats_attribute pool_stats_attr_allocator_slab_count = { - .attr = { .name = "allocator_slab_count", .mode = 0444, }, - .print = pool_stats_print_allocator_slab_count, -}; - -/* The total number of slabs from which blocks have ever been allocated */ -static ssize_t -pool_stats_print_allocator_slabs_opened(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->allocator.slabs_opened); -} - -static struct pool_stats_attribute pool_stats_attr_allocator_slabs_opened = { - .attr = { .name = "allocator_slabs_opened", .mode = 0444, }, - .print = pool_stats_print_allocator_slabs_opened, -}; - -/* The number of times since loading that a slab has been re-opened */ -static ssize_t -pool_stats_print_allocator_slabs_reopened(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->allocator.slabs_reopened); -} - -static struct pool_stats_attribute pool_stats_attr_allocator_slabs_reopened = { - .attr = { .name = "allocator_slabs_reopened", .mode = 0444, }, - .print = pool_stats_print_allocator_slabs_reopened, -}; - -/* Number of times the on-disk journal was full */ -static ssize_t -pool_stats_print_journal_disk_full(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.disk_full); -} - -static struct pool_stats_attribute pool_stats_attr_journal_disk_full = { - .attr = { .name = "journal_disk_full", .mode = 0444, }, - .print = pool_stats_print_journal_disk_full, -}; - -/* Number of times the recovery journal requested slab journal commits. */ -static ssize_t -pool_stats_print_journal_slab_journal_commits_requested(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.slab_journal_commits_requested); -} - -static struct pool_stats_attribute pool_stats_attr_journal_slab_journal_commits_requested = { - .attr = { .name = "journal_slab_journal_commits_requested", .mode = 0444, }, - .print = pool_stats_print_journal_slab_journal_commits_requested, -}; - -/* The total number of items on which processing has started */ -static ssize_t -pool_stats_print_journal_entries_started(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.entries.started); -} - -static struct pool_stats_attribute pool_stats_attr_journal_entries_started = { - .attr = { .name = "journal_entries_started", .mode = 0444, }, - .print = pool_stats_print_journal_entries_started, -}; - -/* The total number of items for which a write operation has been issued */ -static ssize_t -pool_stats_print_journal_entries_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.entries.written); -} - -static struct pool_stats_attribute pool_stats_attr_journal_entries_written = { - .attr = { .name = "journal_entries_written", .mode = 0444, }, - .print = pool_stats_print_journal_entries_written, -}; - -/* The total number of items for which a write operation has completed */ -static ssize_t -pool_stats_print_journal_entries_committed(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.entries.committed); -} - -static struct pool_stats_attribute pool_stats_attr_journal_entries_committed = { - .attr = { .name = "journal_entries_committed", .mode = 0444, }, - .print = pool_stats_print_journal_entries_committed, -}; - -/* The total number of items on which processing has started */ -static ssize_t -pool_stats_print_journal_blocks_started(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.blocks.started); -} - -static struct pool_stats_attribute pool_stats_attr_journal_blocks_started = { - .attr = { .name = "journal_blocks_started", .mode = 0444, }, - .print = pool_stats_print_journal_blocks_started, -}; - -/* The total number of items for which a write operation has been issued */ -static ssize_t -pool_stats_print_journal_blocks_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.blocks.written); -} - -static struct pool_stats_attribute pool_stats_attr_journal_blocks_written = { - .attr = { .name = "journal_blocks_written", .mode = 0444, }, - .print = pool_stats_print_journal_blocks_written, -}; - -/* The total number of items for which a write operation has completed */ -static ssize_t -pool_stats_print_journal_blocks_committed(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->journal.blocks.committed); -} - -static struct pool_stats_attribute pool_stats_attr_journal_blocks_committed = { - .attr = { .name = "journal_blocks_committed", .mode = 0444, }, - .print = pool_stats_print_journal_blocks_committed, -}; - -/* Number of times the on-disk journal was full */ -static ssize_t -pool_stats_print_slab_journal_disk_full_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_journal.disk_full_count); -} - -static struct pool_stats_attribute pool_stats_attr_slab_journal_disk_full_count = { - .attr = { .name = "slab_journal_disk_full_count", .mode = 0444, }, - .print = pool_stats_print_slab_journal_disk_full_count, -}; - -/* Number of times an entry was added over the flush threshold */ -static ssize_t -pool_stats_print_slab_journal_flush_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_journal.flush_count); -} - -static struct pool_stats_attribute pool_stats_attr_slab_journal_flush_count = { - .attr = { .name = "slab_journal_flush_count", .mode = 0444, }, - .print = pool_stats_print_slab_journal_flush_count, -}; - -/* Number of times an entry was added over the block threshold */ -static ssize_t -pool_stats_print_slab_journal_blocked_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_journal.blocked_count); -} - -static struct pool_stats_attribute pool_stats_attr_slab_journal_blocked_count = { - .attr = { .name = "slab_journal_blocked_count", .mode = 0444, }, - .print = pool_stats_print_slab_journal_blocked_count, -}; - -/* Number of times a tail block was written */ -static ssize_t -pool_stats_print_slab_journal_blocks_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_journal.blocks_written); -} - -static struct pool_stats_attribute pool_stats_attr_slab_journal_blocks_written = { - .attr = { .name = "slab_journal_blocks_written", .mode = 0444, }, - .print = pool_stats_print_slab_journal_blocks_written, -}; - -/* Number of times we had to wait for the tail to write */ -static ssize_t -pool_stats_print_slab_journal_tail_busy_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_journal.tail_busy_count); -} - -static struct pool_stats_attribute pool_stats_attr_slab_journal_tail_busy_count = { - .attr = { .name = "slab_journal_tail_busy_count", .mode = 0444, }, - .print = pool_stats_print_slab_journal_tail_busy_count, -}; - -/* Number of blocks written */ -static ssize_t -pool_stats_print_slab_summary_blocks_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->slab_summary.blocks_written); -} - -static struct pool_stats_attribute pool_stats_attr_slab_summary_blocks_written = { - .attr = { .name = "slab_summary_blocks_written", .mode = 0444, }, - .print = pool_stats_print_slab_summary_blocks_written, -}; - -/* Number of reference blocks written */ -static ssize_t -pool_stats_print_ref_counts_blocks_written(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->ref_counts.blocks_written); -} - -static struct pool_stats_attribute pool_stats_attr_ref_counts_blocks_written = { - .attr = { .name = "ref_counts_blocks_written", .mode = 0444, }, - .print = pool_stats_print_ref_counts_blocks_written, -}; - -/* number of dirty (resident) pages */ -static ssize_t -pool_stats_print_block_map_dirty_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.dirty_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_dirty_pages = { - .attr = { .name = "block_map_dirty_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_dirty_pages, -}; - -/* number of clean (resident) pages */ -static ssize_t -pool_stats_print_block_map_clean_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.clean_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_clean_pages = { - .attr = { .name = "block_map_clean_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_clean_pages, -}; - -/* number of free pages */ -static ssize_t -pool_stats_print_block_map_free_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.free_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_free_pages = { - .attr = { .name = "block_map_free_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_free_pages, -}; - -/* number of pages in failed state */ -static ssize_t -pool_stats_print_block_map_failed_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.failed_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_failed_pages = { - .attr = { .name = "block_map_failed_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_failed_pages, -}; - -/* number of pages incoming */ -static ssize_t -pool_stats_print_block_map_incoming_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.incoming_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_incoming_pages = { - .attr = { .name = "block_map_incoming_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_incoming_pages, -}; - -/* number of pages outgoing */ -static ssize_t -pool_stats_print_block_map_outgoing_pages(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.outgoing_pages); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_outgoing_pages = { - .attr = { .name = "block_map_outgoing_pages", .mode = 0444, }, - .print = pool_stats_print_block_map_outgoing_pages, -}; - -/* how many times free page not avail */ -static ssize_t -pool_stats_print_block_map_cache_pressure(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->block_map.cache_pressure); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_cache_pressure = { - .attr = { .name = "block_map_cache_pressure", .mode = 0444, }, - .print = pool_stats_print_block_map_cache_pressure, -}; - -/* number of get_vdo_page() calls for read */ -static ssize_t -pool_stats_print_block_map_read_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.read_count); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_read_count = { - .attr = { .name = "block_map_read_count", .mode = 0444, }, - .print = pool_stats_print_block_map_read_count, -}; - -/* number of get_vdo_page() calls for write */ -static ssize_t -pool_stats_print_block_map_write_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.write_count); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_write_count = { - .attr = { .name = "block_map_write_count", .mode = 0444, }, - .print = pool_stats_print_block_map_write_count, -}; - -/* number of times pages failed to read */ -static ssize_t -pool_stats_print_block_map_failed_reads(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.failed_reads); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_failed_reads = { - .attr = { .name = "block_map_failed_reads", .mode = 0444, }, - .print = pool_stats_print_block_map_failed_reads, -}; - -/* number of times pages failed to write */ -static ssize_t -pool_stats_print_block_map_failed_writes(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.failed_writes); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_failed_writes = { - .attr = { .name = "block_map_failed_writes", .mode = 0444, }, - .print = pool_stats_print_block_map_failed_writes, -}; - -/* number of gets that are reclaimed */ -static ssize_t -pool_stats_print_block_map_reclaimed(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.reclaimed); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_reclaimed = { - .attr = { .name = "block_map_reclaimed", .mode = 0444, }, - .print = pool_stats_print_block_map_reclaimed, -}; - -/* number of gets for outgoing pages */ -static ssize_t -pool_stats_print_block_map_read_outgoing(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.read_outgoing); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_read_outgoing = { - .attr = { .name = "block_map_read_outgoing", .mode = 0444, }, - .print = pool_stats_print_block_map_read_outgoing, -}; - -/* number of gets that were already there */ -static ssize_t -pool_stats_print_block_map_found_in_cache(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.found_in_cache); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_found_in_cache = { - .attr = { .name = "block_map_found_in_cache", .mode = 0444, }, - .print = pool_stats_print_block_map_found_in_cache, -}; - -/* number of gets requiring discard */ -static ssize_t -pool_stats_print_block_map_discard_required(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.discard_required); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_discard_required = { - .attr = { .name = "block_map_discard_required", .mode = 0444, }, - .print = pool_stats_print_block_map_discard_required, -}; - -/* number of gets enqueued for their page */ -static ssize_t -pool_stats_print_block_map_wait_for_page(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.wait_for_page); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_wait_for_page = { - .attr = { .name = "block_map_wait_for_page", .mode = 0444, }, - .print = pool_stats_print_block_map_wait_for_page, -}; - -/* number of gets that have to fetch */ -static ssize_t -pool_stats_print_block_map_fetch_required(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.fetch_required); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_fetch_required = { - .attr = { .name = "block_map_fetch_required", .mode = 0444, }, - .print = pool_stats_print_block_map_fetch_required, -}; - -/* number of page fetches */ -static ssize_t -pool_stats_print_block_map_pages_loaded(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.pages_loaded); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_pages_loaded = { - .attr = { .name = "block_map_pages_loaded", .mode = 0444, }, - .print = pool_stats_print_block_map_pages_loaded, -}; - -/* number of page saves */ -static ssize_t -pool_stats_print_block_map_pages_saved(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.pages_saved); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_pages_saved = { - .attr = { .name = "block_map_pages_saved", .mode = 0444, }, - .print = pool_stats_print_block_map_pages_saved, -}; - -/* the number of flushes issued */ -static ssize_t -pool_stats_print_block_map_flush_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->block_map.flush_count); -} - -static struct pool_stats_attribute pool_stats_attr_block_map_flush_count = { - .attr = { .name = "block_map_flush_count", .mode = 0444, }, - .print = pool_stats_print_block_map_flush_count, -}; - -/* Number of times the UDS advice proved correct */ -static ssize_t -pool_stats_print_hash_lock_dedupe_advice_valid(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->hash_lock.dedupe_advice_valid); -} - -static struct pool_stats_attribute pool_stats_attr_hash_lock_dedupe_advice_valid = { - .attr = { .name = "hash_lock_dedupe_advice_valid", .mode = 0444, }, - .print = pool_stats_print_hash_lock_dedupe_advice_valid, -}; - -/* Number of times the UDS advice proved incorrect */ -static ssize_t -pool_stats_print_hash_lock_dedupe_advice_stale(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->hash_lock.dedupe_advice_stale); -} - -static struct pool_stats_attribute pool_stats_attr_hash_lock_dedupe_advice_stale = { - .attr = { .name = "hash_lock_dedupe_advice_stale", .mode = 0444, }, - .print = pool_stats_print_hash_lock_dedupe_advice_stale, -}; - -/* Number of writes with the same data as another in-flight write */ -static ssize_t -pool_stats_print_hash_lock_concurrent_data_matches(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->hash_lock.concurrent_data_matches); -} - -static struct pool_stats_attribute pool_stats_attr_hash_lock_concurrent_data_matches = { - .attr = { .name = "hash_lock_concurrent_data_matches", .mode = 0444, }, - .print = pool_stats_print_hash_lock_concurrent_data_matches, -}; - -/* Number of writes whose hash collided with an in-flight write */ -static ssize_t -pool_stats_print_hash_lock_concurrent_hash_collisions(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->hash_lock.concurrent_hash_collisions); -} - -static struct pool_stats_attribute pool_stats_attr_hash_lock_concurrent_hash_collisions = { - .attr = { .name = "hash_lock_concurrent_hash_collisions", .mode = 0444, }, - .print = pool_stats_print_hash_lock_concurrent_hash_collisions, -}; - -/* Current number of dedupe queries that are in flight */ -static ssize_t -pool_stats_print_hash_lock_curr_dedupe_queries(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->hash_lock.curr_dedupe_queries); -} - -static struct pool_stats_attribute pool_stats_attr_hash_lock_curr_dedupe_queries = { - .attr = { .name = "hash_lock_curr_dedupe_queries", .mode = 0444, }, - .print = pool_stats_print_hash_lock_curr_dedupe_queries, -}; - -/* number of times VDO got an invalid dedupe advice PBN from UDS */ -static ssize_t -pool_stats_print_errors_invalid_advice_pbn_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->errors.invalid_advice_pbn_count); -} - -static struct pool_stats_attribute pool_stats_attr_errors_invalid_advice_pbn_count = { - .attr = { .name = "errors_invalid_advice_pbn_count", .mode = 0444, }, - .print = pool_stats_print_errors_invalid_advice_pbn_count, -}; - -/* number of times a VIO completed with a VDO_NO_SPACE error */ -static ssize_t -pool_stats_print_errors_no_space_error_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->errors.no_space_error_count); -} - -static struct pool_stats_attribute pool_stats_attr_errors_no_space_error_count = { - .attr = { .name = "errors_no_space_error_count", .mode = 0444, }, - .print = pool_stats_print_errors_no_space_error_count, -}; - -/* number of times a VIO completed with a VDO_READ_ONLY error */ -static ssize_t -pool_stats_print_errors_read_only_error_count(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->errors.read_only_error_count); -} - -static struct pool_stats_attribute pool_stats_attr_errors_read_only_error_count = { - .attr = { .name = "errors_read_only_error_count", .mode = 0444, }, - .print = pool_stats_print_errors_read_only_error_count, -}; - -/* The VDO instance */ -static ssize_t -pool_stats_print_instance(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->instance); -} - -static struct pool_stats_attribute pool_stats_attr_instance = { - .attr = { .name = "instance", .mode = 0444, }, - .print = pool_stats_print_instance, -}; - -/* Current number of active VIOs */ -static ssize_t -pool_stats_print_current_vios_in_progress(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->current_vios_in_progress); -} - -static struct pool_stats_attribute pool_stats_attr_current_vios_in_progress = { - .attr = { .name = "current_vios_in_progress", .mode = 0444, }, - .print = pool_stats_print_current_vios_in_progress, -}; - -/* Maximum number of active VIOs */ -static ssize_t -pool_stats_print_max_vios(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%u\n", stats->max_vios); -} - -static struct pool_stats_attribute pool_stats_attr_max_vios = { - .attr = { .name = "max_vios", .mode = 0444, }, - .print = pool_stats_print_max_vios, -}; - -/* Number of times the UDS index was too slow in responding */ -static ssize_t -pool_stats_print_dedupe_advice_timeouts(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->dedupe_advice_timeouts); -} - -static struct pool_stats_attribute pool_stats_attr_dedupe_advice_timeouts = { - .attr = { .name = "dedupe_advice_timeouts", .mode = 0444, }, - .print = pool_stats_print_dedupe_advice_timeouts, -}; - -/* Number of flush requests submitted to the storage device */ -static ssize_t -pool_stats_print_flush_out(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->flush_out); -} - -static struct pool_stats_attribute pool_stats_attr_flush_out = { - .attr = { .name = "flush_out", .mode = 0444, }, - .print = pool_stats_print_flush_out, -}; - -/* Logical block size */ -static ssize_t -pool_stats_print_logical_block_size(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->logical_block_size); -} - -static struct pool_stats_attribute pool_stats_attr_logical_block_size = { - .attr = { .name = "logical_block_size", .mode = 0444, }, - .print = pool_stats_print_logical_block_size, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_in_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_read = { - .attr = { .name = "bios_in_read", .mode = 0444, }, - .print = pool_stats_print_bios_in_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_in_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_write = { - .attr = { .name = "bios_in_write", .mode = 0444, }, - .print = pool_stats_print_bios_in_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_in_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_empty_flush = { - .attr = { .name = "bios_in_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_in_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_discard = { - .attr = { .name = "bios_in_discard", .mode = 0444, }, - .print = pool_stats_print_bios_in_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_in_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_flush = { - .attr = { .name = "bios_in_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_in_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_fua = { - .attr = { .name = "bios_in_fua", .mode = 0444, }, - .print = pool_stats_print_bios_in_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_in_partial_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_read = { - .attr = { .name = "bios_in_partial_read", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_in_partial_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_write = { - .attr = { .name = "bios_in_partial_write", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_in_partial_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_empty_flush = { - .attr = { .name = "bios_in_partial_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_in_partial_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_discard = { - .attr = { .name = "bios_in_partial_discard", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_in_partial_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_flush = { - .attr = { .name = "bios_in_partial_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_in_partial_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_partial.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_partial_fua = { - .attr = { .name = "bios_in_partial_fua", .mode = 0444, }, - .print = pool_stats_print_bios_in_partial_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_out_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_read = { - .attr = { .name = "bios_out_read", .mode = 0444, }, - .print = pool_stats_print_bios_out_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_out_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_write = { - .attr = { .name = "bios_out_write", .mode = 0444, }, - .print = pool_stats_print_bios_out_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_out_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_empty_flush = { - .attr = { .name = "bios_out_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_out_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_out_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_discard = { - .attr = { .name = "bios_out_discard", .mode = 0444, }, - .print = pool_stats_print_bios_out_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_out_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_flush = { - .attr = { .name = "bios_out_flush", .mode = 0444, }, - .print = pool_stats_print_bios_out_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_out_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_fua = { - .attr = { .name = "bios_out_fua", .mode = 0444, }, - .print = pool_stats_print_bios_out_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_meta_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_read = { - .attr = { .name = "bios_meta_read", .mode = 0444, }, - .print = pool_stats_print_bios_meta_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_meta_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_write = { - .attr = { .name = "bios_meta_write", .mode = 0444, }, - .print = pool_stats_print_bios_meta_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_meta_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_empty_flush = { - .attr = { .name = "bios_meta_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_meta_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_meta_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_discard = { - .attr = { .name = "bios_meta_discard", .mode = 0444, }, - .print = pool_stats_print_bios_meta_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_meta_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_flush = { - .attr = { .name = "bios_meta_flush", .mode = 0444, }, - .print = pool_stats_print_bios_meta_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_meta_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_fua = { - .attr = { .name = "bios_meta_fua", .mode = 0444, }, - .print = pool_stats_print_bios_meta_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_journal_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_read = { - .attr = { .name = "bios_journal_read", .mode = 0444, }, - .print = pool_stats_print_bios_journal_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_journal_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_write = { - .attr = { .name = "bios_journal_write", .mode = 0444, }, - .print = pool_stats_print_bios_journal_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_journal_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_empty_flush = { - .attr = { .name = "bios_journal_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_journal_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_journal_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_discard = { - .attr = { .name = "bios_journal_discard", .mode = 0444, }, - .print = pool_stats_print_bios_journal_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_journal_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_flush = { - .attr = { .name = "bios_journal_flush", .mode = 0444, }, - .print = pool_stats_print_bios_journal_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_journal_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_fua = { - .attr = { .name = "bios_journal_fua", .mode = 0444, }, - .print = pool_stats_print_bios_journal_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_page_cache_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_read = { - .attr = { .name = "bios_page_cache_read", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_page_cache_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_write = { - .attr = { .name = "bios_page_cache_write", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_page_cache_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_empty_flush = { - .attr = { .name = "bios_page_cache_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_page_cache_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_discard = { - .attr = { .name = "bios_page_cache_discard", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_page_cache_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_flush = { - .attr = { .name = "bios_page_cache_flush", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_page_cache_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_fua = { - .attr = { .name = "bios_page_cache_fua", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_out_completed_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_read = { - .attr = { .name = "bios_out_completed_read", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_out_completed_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_write = { - .attr = { .name = "bios_out_completed_write", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_out_completed_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_empty_flush = { - .attr = { .name = "bios_out_completed_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_out_completed_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_discard = { - .attr = { .name = "bios_out_completed_discard", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_out_completed_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_flush = { - .attr = { .name = "bios_out_completed_flush", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_out_completed_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_out_completed.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_out_completed_fua = { - .attr = { .name = "bios_out_completed_fua", .mode = 0444, }, - .print = pool_stats_print_bios_out_completed_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_meta_completed_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_read = { - .attr = { .name = "bios_meta_completed_read", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_meta_completed_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_write = { - .attr = { .name = "bios_meta_completed_write", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_meta_completed_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_empty_flush = { - .attr = { .name = "bios_meta_completed_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_meta_completed_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_discard = { - .attr = { .name = "bios_meta_completed_discard", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_meta_completed_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_flush = { - .attr = { .name = "bios_meta_completed_flush", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_meta_completed_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_meta_completed.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_meta_completed_fua = { - .attr = { .name = "bios_meta_completed_fua", .mode = 0444, }, - .print = pool_stats_print_bios_meta_completed_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_journal_completed_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_read = { - .attr = { .name = "bios_journal_completed_read", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_journal_completed_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_write = { - .attr = { .name = "bios_journal_completed_write", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_journal_completed_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_empty_flush = { - .attr = { .name = "bios_journal_completed_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_journal_completed_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_discard = { - .attr = { .name = "bios_journal_completed_discard", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_journal_completed_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_flush = { - .attr = { .name = "bios_journal_completed_flush", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_journal_completed_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_journal_completed.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_journal_completed_fua = { - .attr = { .name = "bios_journal_completed_fua", .mode = 0444, }, - .print = pool_stats_print_bios_journal_completed_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_page_cache_completed_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_read = { - .attr = { .name = "bios_page_cache_completed_read", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_page_cache_completed_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_write = { - .attr = { .name = "bios_page_cache_completed_write", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_page_cache_completed_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_empty_flush = { - .attr = { .name = "bios_page_cache_completed_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_page_cache_completed_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_discard = { - .attr = { .name = "bios_page_cache_completed_discard", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_page_cache_completed_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_flush = { - .attr = { .name = "bios_page_cache_completed_flush", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_page_cache_completed_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_page_cache_completed.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_page_cache_completed_fua = { - .attr = { .name = "bios_page_cache_completed_fua", .mode = 0444, }, - .print = pool_stats_print_bios_page_cache_completed_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_acknowledged_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_read = { - .attr = { .name = "bios_acknowledged_read", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_acknowledged_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_write = { - .attr = { .name = "bios_acknowledged_write", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_acknowledged_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_empty_flush = { - .attr = { .name = "bios_acknowledged_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_acknowledged_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_discard = { - .attr = { .name = "bios_acknowledged_discard", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_acknowledged_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_flush = { - .attr = { .name = "bios_acknowledged_flush", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_acknowledged_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_fua = { - .attr = { .name = "bios_acknowledged_fua", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_read = { - .attr = { .name = "bios_acknowledged_partial_read", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_write = { - .attr = { .name = "bios_acknowledged_partial_write", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_empty_flush = { - .attr = { .name = "bios_acknowledged_partial_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_discard = { - .attr = { .name = "bios_acknowledged_partial_discard", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_flush = { - .attr = { .name = "bios_acknowledged_partial_flush", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_acknowledged_partial_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_acknowledged_partial.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_acknowledged_partial_fua = { - .attr = { .name = "bios_acknowledged_partial_fua", .mode = 0444, }, - .print = pool_stats_print_bios_acknowledged_partial_fua, -}; - -/* Number of REQ_OP_READ bios */ -static ssize_t -pool_stats_print_bios_in_progress_read(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.read); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_read = { - .attr = { .name = "bios_in_progress_read", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_read, -}; - -/* Number of REQ_OP_WRITE bios with data */ -static ssize_t -pool_stats_print_bios_in_progress_write(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.write); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_write = { - .attr = { .name = "bios_in_progress_write", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_write, -}; - -/* Number of bios tagged with REQ_PREFLUSH and containing no data */ -static ssize_t -pool_stats_print_bios_in_progress_empty_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.empty_flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_empty_flush = { - .attr = { .name = "bios_in_progress_empty_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_empty_flush, -}; - -/* Number of REQ_OP_DISCARD bios */ -static ssize_t -pool_stats_print_bios_in_progress_discard(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.discard); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_discard = { - .attr = { .name = "bios_in_progress_discard", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_discard, -}; - -/* Number of bios tagged with REQ_PREFLUSH */ -static ssize_t -pool_stats_print_bios_in_progress_flush(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.flush); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_flush = { - .attr = { .name = "bios_in_progress_flush", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_flush, -}; - -/* Number of bios tagged with REQ_FUA */ -static ssize_t -pool_stats_print_bios_in_progress_fua(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->bios_in_progress.fua); -} - -static struct pool_stats_attribute pool_stats_attr_bios_in_progress_fua = { - .attr = { .name = "bios_in_progress_fua", .mode = 0444, }, - .print = pool_stats_print_bios_in_progress_fua, -}; - -/* Tracked bytes currently allocated. */ -static ssize_t -pool_stats_print_memory_usage_bytes_used(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->memory_usage.bytes_used); -} - -static struct pool_stats_attribute pool_stats_attr_memory_usage_bytes_used = { - .attr = { .name = "memory_usage_bytes_used", .mode = 0444, }, - .print = pool_stats_print_memory_usage_bytes_used, -}; - -/* Maximum tracked bytes allocated. */ -static ssize_t -pool_stats_print_memory_usage_peak_bytes_used(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->memory_usage.peak_bytes_used); -} - -static struct pool_stats_attribute pool_stats_attr_memory_usage_peak_bytes_used = { - .attr = { .name = "memory_usage_peak_bytes_used", .mode = 0444, }, - .print = pool_stats_print_memory_usage_peak_bytes_used, -}; - -/* Number of records stored in the index */ -static ssize_t -pool_stats_print_index_entries_indexed(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.entries_indexed); -} - -static struct pool_stats_attribute pool_stats_attr_index_entries_indexed = { - .attr = { .name = "index_entries_indexed", .mode = 0444, }, - .print = pool_stats_print_index_entries_indexed, -}; - -/* Number of post calls that found an existing entry */ -static ssize_t -pool_stats_print_index_posts_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.posts_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_posts_found = { - .attr = { .name = "index_posts_found", .mode = 0444, }, - .print = pool_stats_print_index_posts_found, -}; - -/* Number of post calls that added a new entry */ -static ssize_t -pool_stats_print_index_posts_not_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.posts_not_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_posts_not_found = { - .attr = { .name = "index_posts_not_found", .mode = 0444, }, - .print = pool_stats_print_index_posts_not_found, -}; - -/* Number of query calls that found an existing entry */ -static ssize_t -pool_stats_print_index_queries_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.queries_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_queries_found = { - .attr = { .name = "index_queries_found", .mode = 0444, }, - .print = pool_stats_print_index_queries_found, -}; - -/* Number of query calls that added a new entry */ -static ssize_t -pool_stats_print_index_queries_not_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.queries_not_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_queries_not_found = { - .attr = { .name = "index_queries_not_found", .mode = 0444, }, - .print = pool_stats_print_index_queries_not_found, -}; - -/* Number of update calls that found an existing entry */ -static ssize_t -pool_stats_print_index_updates_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.updates_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_updates_found = { - .attr = { .name = "index_updates_found", .mode = 0444, }, - .print = pool_stats_print_index_updates_found, -}; - -/* Number of update calls that added a new entry */ -static ssize_t -pool_stats_print_index_updates_not_found(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.updates_not_found); -} - -static struct pool_stats_attribute pool_stats_attr_index_updates_not_found = { - .attr = { .name = "index_updates_not_found", .mode = 0444, }, - .print = pool_stats_print_index_updates_not_found, -}; - -/* Number of entries discarded */ -static ssize_t -pool_stats_print_index_entries_discarded(struct vdo_statistics *stats, char *buf) -{ - return sprintf(buf, "%llu\n", stats->index.entries_discarded); -} - -static struct pool_stats_attribute pool_stats_attr_index_entries_discarded = { - .attr = { .name = "index_entries_discarded", .mode = 0444, }, - .print = pool_stats_print_index_entries_discarded, -}; - -struct attribute *vdo_pool_stats_attrs[] = { - &pool_stats_attr_data_blocks_used.attr, - &pool_stats_attr_overhead_blocks_used.attr, - &pool_stats_attr_logical_blocks_used.attr, - &pool_stats_attr_physical_blocks.attr, - &pool_stats_attr_logical_blocks.attr, - &pool_stats_attr_block_map_cache_size.attr, - &pool_stats_attr_block_size.attr, - &pool_stats_attr_complete_recoveries.attr, - &pool_stats_attr_read_only_recoveries.attr, - &pool_stats_attr_mode.attr, - &pool_stats_attr_in_recovery_mode.attr, - &pool_stats_attr_recovery_percentage.attr, - &pool_stats_attr_packer_compressed_fragments_written.attr, - &pool_stats_attr_packer_compressed_blocks_written.attr, - &pool_stats_attr_packer_compressed_fragments_in_packer.attr, - &pool_stats_attr_allocator_slab_count.attr, - &pool_stats_attr_allocator_slabs_opened.attr, - &pool_stats_attr_allocator_slabs_reopened.attr, - &pool_stats_attr_journal_disk_full.attr, - &pool_stats_attr_journal_slab_journal_commits_requested.attr, - &pool_stats_attr_journal_entries_started.attr, - &pool_stats_attr_journal_entries_written.attr, - &pool_stats_attr_journal_entries_committed.attr, - &pool_stats_attr_journal_blocks_started.attr, - &pool_stats_attr_journal_blocks_written.attr, - &pool_stats_attr_journal_blocks_committed.attr, - &pool_stats_attr_slab_journal_disk_full_count.attr, - &pool_stats_attr_slab_journal_flush_count.attr, - &pool_stats_attr_slab_journal_blocked_count.attr, - &pool_stats_attr_slab_journal_blocks_written.attr, - &pool_stats_attr_slab_journal_tail_busy_count.attr, - &pool_stats_attr_slab_summary_blocks_written.attr, - &pool_stats_attr_ref_counts_blocks_written.attr, - &pool_stats_attr_block_map_dirty_pages.attr, - &pool_stats_attr_block_map_clean_pages.attr, - &pool_stats_attr_block_map_free_pages.attr, - &pool_stats_attr_block_map_failed_pages.attr, - &pool_stats_attr_block_map_incoming_pages.attr, - &pool_stats_attr_block_map_outgoing_pages.attr, - &pool_stats_attr_block_map_cache_pressure.attr, - &pool_stats_attr_block_map_read_count.attr, - &pool_stats_attr_block_map_write_count.attr, - &pool_stats_attr_block_map_failed_reads.attr, - &pool_stats_attr_block_map_failed_writes.attr, - &pool_stats_attr_block_map_reclaimed.attr, - &pool_stats_attr_block_map_read_outgoing.attr, - &pool_stats_attr_block_map_found_in_cache.attr, - &pool_stats_attr_block_map_discard_required.attr, - &pool_stats_attr_block_map_wait_for_page.attr, - &pool_stats_attr_block_map_fetch_required.attr, - &pool_stats_attr_block_map_pages_loaded.attr, - &pool_stats_attr_block_map_pages_saved.attr, - &pool_stats_attr_block_map_flush_count.attr, - &pool_stats_attr_hash_lock_dedupe_advice_valid.attr, - &pool_stats_attr_hash_lock_dedupe_advice_stale.attr, - &pool_stats_attr_hash_lock_concurrent_data_matches.attr, - &pool_stats_attr_hash_lock_concurrent_hash_collisions.attr, - &pool_stats_attr_hash_lock_curr_dedupe_queries.attr, - &pool_stats_attr_errors_invalid_advice_pbn_count.attr, - &pool_stats_attr_errors_no_space_error_count.attr, - &pool_stats_attr_errors_read_only_error_count.attr, - &pool_stats_attr_instance.attr, - &pool_stats_attr_current_vios_in_progress.attr, - &pool_stats_attr_max_vios.attr, - &pool_stats_attr_dedupe_advice_timeouts.attr, - &pool_stats_attr_flush_out.attr, - &pool_stats_attr_logical_block_size.attr, - &pool_stats_attr_bios_in_read.attr, - &pool_stats_attr_bios_in_write.attr, - &pool_stats_attr_bios_in_empty_flush.attr, - &pool_stats_attr_bios_in_discard.attr, - &pool_stats_attr_bios_in_flush.attr, - &pool_stats_attr_bios_in_fua.attr, - &pool_stats_attr_bios_in_partial_read.attr, - &pool_stats_attr_bios_in_partial_write.attr, - &pool_stats_attr_bios_in_partial_empty_flush.attr, - &pool_stats_attr_bios_in_partial_discard.attr, - &pool_stats_attr_bios_in_partial_flush.attr, - &pool_stats_attr_bios_in_partial_fua.attr, - &pool_stats_attr_bios_out_read.attr, - &pool_stats_attr_bios_out_write.attr, - &pool_stats_attr_bios_out_empty_flush.attr, - &pool_stats_attr_bios_out_discard.attr, - &pool_stats_attr_bios_out_flush.attr, - &pool_stats_attr_bios_out_fua.attr, - &pool_stats_attr_bios_meta_read.attr, - &pool_stats_attr_bios_meta_write.attr, - &pool_stats_attr_bios_meta_empty_flush.attr, - &pool_stats_attr_bios_meta_discard.attr, - &pool_stats_attr_bios_meta_flush.attr, - &pool_stats_attr_bios_meta_fua.attr, - &pool_stats_attr_bios_journal_read.attr, - &pool_stats_attr_bios_journal_write.attr, - &pool_stats_attr_bios_journal_empty_flush.attr, - &pool_stats_attr_bios_journal_discard.attr, - &pool_stats_attr_bios_journal_flush.attr, - &pool_stats_attr_bios_journal_fua.attr, - &pool_stats_attr_bios_page_cache_read.attr, - &pool_stats_attr_bios_page_cache_write.attr, - &pool_stats_attr_bios_page_cache_empty_flush.attr, - &pool_stats_attr_bios_page_cache_discard.attr, - &pool_stats_attr_bios_page_cache_flush.attr, - &pool_stats_attr_bios_page_cache_fua.attr, - &pool_stats_attr_bios_out_completed_read.attr, - &pool_stats_attr_bios_out_completed_write.attr, - &pool_stats_attr_bios_out_completed_empty_flush.attr, - &pool_stats_attr_bios_out_completed_discard.attr, - &pool_stats_attr_bios_out_completed_flush.attr, - &pool_stats_attr_bios_out_completed_fua.attr, - &pool_stats_attr_bios_meta_completed_read.attr, - &pool_stats_attr_bios_meta_completed_write.attr, - &pool_stats_attr_bios_meta_completed_empty_flush.attr, - &pool_stats_attr_bios_meta_completed_discard.attr, - &pool_stats_attr_bios_meta_completed_flush.attr, - &pool_stats_attr_bios_meta_completed_fua.attr, - &pool_stats_attr_bios_journal_completed_read.attr, - &pool_stats_attr_bios_journal_completed_write.attr, - &pool_stats_attr_bios_journal_completed_empty_flush.attr, - &pool_stats_attr_bios_journal_completed_discard.attr, - &pool_stats_attr_bios_journal_completed_flush.attr, - &pool_stats_attr_bios_journal_completed_fua.attr, - &pool_stats_attr_bios_page_cache_completed_read.attr, - &pool_stats_attr_bios_page_cache_completed_write.attr, - &pool_stats_attr_bios_page_cache_completed_empty_flush.attr, - &pool_stats_attr_bios_page_cache_completed_discard.attr, - &pool_stats_attr_bios_page_cache_completed_flush.attr, - &pool_stats_attr_bios_page_cache_completed_fua.attr, - &pool_stats_attr_bios_acknowledged_read.attr, - &pool_stats_attr_bios_acknowledged_write.attr, - &pool_stats_attr_bios_acknowledged_empty_flush.attr, - &pool_stats_attr_bios_acknowledged_discard.attr, - &pool_stats_attr_bios_acknowledged_flush.attr, - &pool_stats_attr_bios_acknowledged_fua.attr, - &pool_stats_attr_bios_acknowledged_partial_read.attr, - &pool_stats_attr_bios_acknowledged_partial_write.attr, - &pool_stats_attr_bios_acknowledged_partial_empty_flush.attr, - &pool_stats_attr_bios_acknowledged_partial_discard.attr, - &pool_stats_attr_bios_acknowledged_partial_flush.attr, - &pool_stats_attr_bios_acknowledged_partial_fua.attr, - &pool_stats_attr_bios_in_progress_read.attr, - &pool_stats_attr_bios_in_progress_write.attr, - &pool_stats_attr_bios_in_progress_empty_flush.attr, - &pool_stats_attr_bios_in_progress_discard.attr, - &pool_stats_attr_bios_in_progress_flush.attr, - &pool_stats_attr_bios_in_progress_fua.attr, - &pool_stats_attr_memory_usage_bytes_used.attr, - &pool_stats_attr_memory_usage_peak_bytes_used.attr, - &pool_stats_attr_index_entries_indexed.attr, - &pool_stats_attr_index_posts_found.attr, - &pool_stats_attr_index_posts_not_found.attr, - &pool_stats_attr_index_queries_found.attr, - &pool_stats_attr_index_queries_not_found.attr, - &pool_stats_attr_index_updates_found.attr, - &pool_stats_attr_index_updates_not_found.attr, - &pool_stats_attr_index_entries_discarded.attr, - NULL, -}; diff --git a/drivers/md/dm-vdo/pool-sysfs.c b/drivers/md/dm-vdo/pool-sysfs.c deleted file mode 100644 index 6769c5711cbc..000000000000 --- a/drivers/md/dm-vdo/pool-sysfs.c +++ /dev/null @@ -1,198 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2023 Red Hat - */ - -#include "pool-sysfs.h" - -#include - -#include "memory-alloc.h" -#include "string-utils.h" - -#include "data-vio.h" -#include "dedupe.h" -#include "vdo.h" - -struct pool_attribute { - struct attribute attr; - ssize_t (*show)(struct vdo *vdo, char *buf); - ssize_t (*store)(struct vdo *vdo, const char *value, size_t count); -}; - -static ssize_t vdo_pool_attr_show(struct kobject *directory, struct attribute *attr, - char *buf) -{ - struct pool_attribute *pool_attr = container_of(attr, struct pool_attribute, - attr); - struct vdo *vdo = container_of(directory, struct vdo, vdo_directory); - - if (pool_attr->show == NULL) - return -EINVAL; - return pool_attr->show(vdo, buf); -} - -static ssize_t vdo_pool_attr_store(struct kobject *directory, struct attribute *attr, - const char *buf, size_t length) -{ - struct pool_attribute *pool_attr = container_of(attr, struct pool_attribute, - attr); - struct vdo *vdo = container_of(directory, struct vdo, vdo_directory); - - if (pool_attr->store == NULL) - return -EINVAL; - return pool_attr->store(vdo, buf, length); -} - -static const struct sysfs_ops vdo_pool_sysfs_ops = { - .show = vdo_pool_attr_show, - .store = vdo_pool_attr_store, -}; - -static ssize_t pool_compressing_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%s\n", (vdo_get_compressing(vdo) ? "1" : "0")); -} - -static ssize_t pool_discards_active_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", - get_data_vio_pool_active_discards(vdo->data_vio_pool)); -} - -static ssize_t pool_discards_limit_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", get_data_vio_pool_discard_limit(vdo->data_vio_pool)); -} - -static ssize_t pool_discards_limit_store(struct vdo *vdo, const char *buf, size_t length) -{ - unsigned int value; - int result; - - if ((length > 12) || (kstrtouint(buf, 10, &value) < 0) || (value < 1)) - return -EINVAL; - - result = set_data_vio_pool_discard_limit(vdo->data_vio_pool, value); - if (result != VDO_SUCCESS) - return -EINVAL; - - return length; -} - -static ssize_t pool_discards_maximum_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", - get_data_vio_pool_maximum_discards(vdo->data_vio_pool)); -} - -static ssize_t pool_instance_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", vdo->instance); -} - -static ssize_t pool_requests_active_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", - get_data_vio_pool_active_requests(vdo->data_vio_pool)); -} - -static ssize_t pool_requests_limit_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", get_data_vio_pool_request_limit(vdo->data_vio_pool)); -} - -static ssize_t pool_requests_maximum_show(struct vdo *vdo, char *buf) -{ - return sprintf(buf, "%u\n", - get_data_vio_pool_maximum_requests(vdo->data_vio_pool)); -} - -static void vdo_pool_release(struct kobject *directory) -{ - vdo_free(container_of(directory, struct vdo, vdo_directory)); -} - -static struct pool_attribute vdo_pool_compressing_attr = { - .attr = { - .name = "compressing", - .mode = 0444, - }, - .show = pool_compressing_show, -}; - -static struct pool_attribute vdo_pool_discards_active_attr = { - .attr = { - .name = "discards_active", - .mode = 0444, - }, - .show = pool_discards_active_show, -}; - -static struct pool_attribute vdo_pool_discards_limit_attr = { - .attr = { - .name = "discards_limit", - .mode = 0644, - }, - .show = pool_discards_limit_show, - .store = pool_discards_limit_store, -}; - -static struct pool_attribute vdo_pool_discards_maximum_attr = { - .attr = { - .name = "discards_maximum", - .mode = 0444, - }, - .show = pool_discards_maximum_show, -}; - -static struct pool_attribute vdo_pool_instance_attr = { - .attr = { - .name = "instance", - .mode = 0444, - }, - .show = pool_instance_show, -}; - -static struct pool_attribute vdo_pool_requests_active_attr = { - .attr = { - .name = "requests_active", - .mode = 0444, - }, - .show = pool_requests_active_show, -}; - -static struct pool_attribute vdo_pool_requests_limit_attr = { - .attr = { - .name = "requests_limit", - .mode = 0444, - }, - .show = pool_requests_limit_show, -}; - -static struct pool_attribute vdo_pool_requests_maximum_attr = { - .attr = { - .name = "requests_maximum", - .mode = 0444, - }, - .show = pool_requests_maximum_show, -}; - -static struct attribute *pool_attrs[] = { - &vdo_pool_compressing_attr.attr, - &vdo_pool_discards_active_attr.attr, - &vdo_pool_discards_limit_attr.attr, - &vdo_pool_discards_maximum_attr.attr, - &vdo_pool_instance_attr.attr, - &vdo_pool_requests_active_attr.attr, - &vdo_pool_requests_limit_attr.attr, - &vdo_pool_requests_maximum_attr.attr, - NULL, -}; -ATTRIBUTE_GROUPS(pool); - -const struct kobj_type vdo_directory_type = { - .release = vdo_pool_release, - .sysfs_ops = &vdo_pool_sysfs_ops, - .default_groups = pool_groups, -}; diff --git a/drivers/md/dm-vdo/pool-sysfs.h b/drivers/md/dm-vdo/pool-sysfs.h deleted file mode 100644 index 00e680924dc1..000000000000 --- a/drivers/md/dm-vdo/pool-sysfs.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2023 Red Hat - */ - -#ifndef VDO_POOL_SYSFS_H -#define VDO_POOL_SYSFS_H - -#include - -/* The kobj_type used for setting up the kernel layer kobject. */ -extern const struct kobj_type vdo_directory_type; - -/* The sysfs_ops used for the "statistics" subdirectory. */ -extern const struct sysfs_ops vdo_pool_stats_sysfs_ops; -/* The attribute used for the "statistics" subdirectory. */ -extern struct attribute *vdo_pool_stats_attrs[]; - -#endif /* VDO_POOL_SYSFS_H */ diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c index 92c42b8bbb8b..42e87b2344bc 100644 --- a/drivers/md/dm-vdo/status-codes.c +++ b/drivers/md/dm-vdo/status-codes.c @@ -38,7 +38,6 @@ const struct error_info vdo_status_list[] = { { "VDO_BAD_NONCE", "Bad nonce" }, { "VDO_JOURNAL_OVERFLOW", "Journal sequence number overflow" }, { "VDO_INVALID_ADMIN_STATE", "Invalid operation for current state" }, - { "VDO_CANT_ADD_SYSFS_NODE", "Failed to add sysfs node" }, }; static atomic_t vdo_status_codes_registered = ATOMIC_INIT(0); diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h index eb847def8eb4..72da04159f88 100644 --- a/drivers/md/dm-vdo/status-codes.h +++ b/drivers/md/dm-vdo/status-codes.h @@ -72,8 +72,6 @@ enum vdo_status_codes { VDO_JOURNAL_OVERFLOW, /* the VDO is not in a state to perform an admin operation */ VDO_INVALID_ADMIN_STATE, - /* failure adding a sysfs node */ - VDO_CANT_ADD_SYSFS_NODE, /* one more than last error code */ VDO_STATUS_CODE_LAST, VDO_STATUS_CODE_BLOCK_END = VDO_ERRORS_BLOCK_END diff --git a/drivers/md/dm-vdo/sysfs.c b/drivers/md/dm-vdo/sysfs.c deleted file mode 100644 index 70feffe9d4c4..000000000000 --- a/drivers/md/dm-vdo/sysfs.c +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2023 Red Hat - */ - -#include - -#include "logger.h" - -#include "constants.h" -#include "dedupe.h" -#include "vdo.h" - -static int vdo_log_level_show(char *buf, const struct kernel_param *kp) -{ - return sprintf(buf, "%s\n", uds_log_priority_to_string(uds_get_log_level())); -} - -static int vdo_log_level_store(const char *buf, const struct kernel_param *kp) -{ - static char internal_buf[11]; - - int n = strlen(buf); - - if (n > 10) - return -EINVAL; - - memset(internal_buf, '\000', sizeof(internal_buf)); - memcpy(internal_buf, buf, n); - if (internal_buf[n - 1] == '\n') - internal_buf[n - 1] = '\000'; - uds_set_log_level(uds_log_string_to_priority(internal_buf)); - return 0; -} - - -static int vdo_dedupe_timeout_interval_store(const char *buf, - const struct kernel_param *kp) -{ - int result = param_set_uint(buf, kp); - - if (result != 0) - return result; - vdo_set_dedupe_index_timeout_interval(*(uint *)kp->arg); - return 0; -} - -static int vdo_min_dedupe_timer_interval_store(const char *buf, - const struct kernel_param *kp) -{ - int result = param_set_uint(buf, kp); - - if (result != 0) - return result; - vdo_set_dedupe_index_min_timer_interval(*(uint *)kp->arg); - return 0; -} - -static const struct kernel_param_ops log_level_ops = { - .set = vdo_log_level_store, - .get = vdo_log_level_show, -}; - - -static const struct kernel_param_ops dedupe_timeout_ops = { - .set = vdo_dedupe_timeout_interval_store, - .get = param_get_uint, -}; - -static const struct kernel_param_ops dedupe_timer_ops = { - .set = vdo_min_dedupe_timer_interval_store, - .get = param_get_uint, -}; - -module_param_cb(log_level, &log_level_ops, NULL, 0644); - - -module_param_cb(deduplication_timeout_interval, &dedupe_timeout_ops, - &vdo_dedupe_index_timeout_interval, 0644); - -module_param_cb(min_deduplication_timer_interval, &dedupe_timer_ops, - &vdo_dedupe_index_min_timer_interval, 0644); diff --git a/drivers/md/dm-vdo/uds-sysfs.c b/drivers/md/dm-vdo/uds-sysfs.c deleted file mode 100644 index 753d81d6f207..000000000000 --- a/drivers/md/dm-vdo/uds-sysfs.c +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2023 Red Hat - */ - -#include "uds-sysfs.h" - -#include -#include -#include - -#include "logger.h" -#include "memory-alloc.h" -#include "string-utils.h" - -#include "indexer.h" - -#define UDS_SYSFS_NAME "uds" - -static struct { - /* /sys/uds */ - struct kobject kobj; - /* /sys/uds/parameter */ - struct kobject parameter_kobj; - - /* These flags are used to ensure a clean shutdown */ - - /* /sys/uds flag */ - bool flag; - /* /sys/uds/parameter flag */ - bool parameter_flag; -} object_root; - -static char *buffer_to_string(const char *buf, size_t length) -{ - char *string; - - if (vdo_allocate(length + 1, char, __func__, &string) != VDO_SUCCESS) - return NULL; - - memcpy(string, buf, length); - string[length] = '\0'; - if (string[length - 1] == '\n') - string[length - 1] = '\0'; - - return string; -} - -/* - * This is the code for any directory in the /sys/ tree that contains no regular files - * (only subdirectories). - */ - -static void empty_release(struct kobject *kobj) -{ -} - -static ssize_t empty_show(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return 0; -} - -static ssize_t empty_store(struct kobject *kobj, struct attribute *attr, const char *buf, - size_t length) -{ - return length; -} - -static const struct sysfs_ops empty_ops = { - .show = empty_show, - .store = empty_store, -}; - -static struct attribute *empty_attrs[] = { - NULL, -}; -ATTRIBUTE_GROUPS(empty); - -static const struct kobj_type empty_object_type = { - .release = empty_release, - .sysfs_ops = &empty_ops, - .default_groups = empty_groups, -}; - -/* - * This is the code for the /sys//parameter directory. - * /log_level UDS_LOG_LEVEL - */ - -struct parameter_attribute { - struct attribute attr; - const char *(*show_string)(void); - void (*store_string)(const char *string); -}; - -static ssize_t parameter_show(struct kobject *kobj, struct attribute *attr, char *buf) -{ - struct parameter_attribute *pa; - - pa = container_of(attr, struct parameter_attribute, attr); - if (pa->show_string != NULL) - return sprintf(buf, "%s\n", pa->show_string()); - else - return -EINVAL; -} - -static ssize_t parameter_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t length) -{ - char *string; - struct parameter_attribute *pa; - - pa = container_of(attr, struct parameter_attribute, attr); - if (pa->store_string == NULL) - return -EINVAL; - string = buffer_to_string(buf, length); - if (string == NULL) - return -ENOMEM; - - pa->store_string(string); - vdo_free(string); - return length; -} - -static const char *parameter_show_log_level(void) -{ - return uds_log_priority_to_string(uds_get_log_level()); -} - -static void parameter_store_log_level(const char *string) -{ - uds_set_log_level(uds_log_string_to_priority(string)); -} - -static struct parameter_attribute log_level_attr = { - .attr = { .name = "log_level", .mode = 0600 }, - .show_string = parameter_show_log_level, - .store_string = parameter_store_log_level, -}; - -static struct attribute *parameter_attrs[] = { - &log_level_attr.attr, - NULL, -}; -ATTRIBUTE_GROUPS(parameter); - -static const struct sysfs_ops parameter_ops = { - .show = parameter_show, - .store = parameter_store, -}; - -static const struct kobj_type parameter_object_type = { - .release = empty_release, - .sysfs_ops = ¶meter_ops, - .default_groups = parameter_groups, -}; - -int uds_init_sysfs(void) -{ - int result; - - memset(&object_root, 0, sizeof(object_root)); - kobject_init(&object_root.kobj, &empty_object_type); - result = kobject_add(&object_root.kobj, NULL, UDS_SYSFS_NAME); - if (result == 0) { - object_root.flag = true; - kobject_init(&object_root.parameter_kobj, ¶meter_object_type); - result = kobject_add(&object_root.parameter_kobj, &object_root.kobj, - "parameter"); - if (result == 0) - object_root.parameter_flag = true; - } - - if (result != 0) - uds_put_sysfs(); - - return result; -} - -void uds_put_sysfs(void) -{ - if (object_root.parameter_flag) - kobject_put(&object_root.parameter_kobj); - - if (object_root.flag) - kobject_put(&object_root.kobj); -} diff --git a/drivers/md/dm-vdo/uds-sysfs.h b/drivers/md/dm-vdo/uds-sysfs.h deleted file mode 100644 index c3d00a7187bd..000000000000 --- a/drivers/md/dm-vdo/uds-sysfs.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2023 Red Hat - */ - -#ifndef UDS_SYSFS_H -#define UDS_SYSFS_H - -int uds_init_sysfs(void); -void uds_put_sysfs(void); - -#endif /* UDS_SYSFS_H */ diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 11be2ab17e29..28e6352c758e 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -53,7 +53,6 @@ #include "logical-zone.h" #include "packer.h" #include "physical-zone.h" -#include "pool-sysfs.h" #include "recovery-journal.h" #include "slab-depot.h" #include "statistics.h" @@ -691,13 +690,6 @@ void vdo_destroy(struct vdo *vdo) vdo->allocations_allowed = true; - /* Stop services that need to gather VDO statistics from the worker threads. */ - if (vdo->sysfs_added) { - init_completion(&vdo->stats_shutdown); - kobject_put(&vdo->stats_directory); - wait_for_completion(&vdo->stats_shutdown); - } - finish_vdo(vdo); unregister_vdo(vdo); free_data_vio_pool(vdo->data_vio_pool); @@ -732,15 +724,7 @@ void vdo_destroy(struct vdo *vdo) vdo_free(vdo_forget(vdo->compression_context)); } - - /* - * The call to kobject_put on the kobj sysfs node will decrement its reference count; when - * the count goes to zero the VDO object will be freed as a side effect. - */ - if (!vdo->sysfs_added) - vdo_free(vdo); - else - kobject_put(&vdo->vdo_directory); + vdo_free(vdo); } static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block) @@ -817,42 +801,6 @@ void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent) REQ_OP_READ); } -/** - * pool_stats_release() - Signal that sysfs stats have been shut down. - * @directory: The vdo stats directory. - */ -static void pool_stats_release(struct kobject *directory) -{ - struct vdo *vdo = container_of(directory, struct vdo, stats_directory); - - complete(&vdo->stats_shutdown); -} - -ATTRIBUTE_GROUPS(vdo_pool_stats); -static const struct kobj_type stats_directory_type = { - .release = pool_stats_release, - .sysfs_ops = &vdo_pool_stats_sysfs_ops, - .default_groups = vdo_pool_stats_groups, -}; - -/** - * vdo_add_sysfs_stats_dir() - Add the stats directory to the vdo sysfs directory. - * @vdo: The vdo. - * - * Return: VDO_SUCCESS or an error. - */ -int vdo_add_sysfs_stats_dir(struct vdo *vdo) -{ - int result; - - kobject_init(&vdo->stats_directory, &stats_directory_type); - result = kobject_add(&vdo->stats_directory, &vdo->vdo_directory, "statistics"); - if (result != 0) - return VDO_CANT_ADD_SYSFS_NODE; - - return VDO_SUCCESS; -} - /** * vdo_get_backing_device() - Get the block device object underlying a vdo. * @vdo: The vdo. diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h index 01558fc67f24..483ae873e002 100644 --- a/drivers/md/dm-vdo/vdo.h +++ b/drivers/md/dm-vdo/vdo.h @@ -10,7 +10,6 @@ #include #include #include -#include #include #include @@ -248,11 +247,6 @@ struct vdo { struct vdo_statistics stats_buffer; /* Protects the stats_buffer */ struct mutex stats_mutex; - /* true if sysfs directory is set up */ - bool sysfs_added; - /* Used when shutting down the sysfs statistics */ - struct completion stats_shutdown; - /* A list of all device_configs referencing this vdo */ struct list_head device_config_list; @@ -264,15 +258,10 @@ struct vdo { u64 starting_sector_offset; struct volume_geometry geometry; - /* For sysfs */ - struct kobject vdo_directory; - struct kobject stats_directory; - /* N blobs of context data for LZ4 code, one per CPU thread. */ char **compression_context; }; - /** * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue * for acknowledging received and processed bios. @@ -315,8 +304,6 @@ void vdo_destroy(struct vdo *vdo); void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent); -int __must_check vdo_add_sysfs_stats_dir(struct vdo *vdo); - struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo); const char * __must_check vdo_get_device_name(const struct dm_target *target); -- cgit v1.2.3 From 3584240b9ce4adf63c3a985a730eb3f75806c26d Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 14 Feb 2024 09:22:04 -0600 Subject: dm vdo logger: change from uds_ to vdo_ namespace Rename all uds_log_* to vdo_log_*. Signed-off-by: Mike Snitzer Signed-off-by: Chung Chung Signed-off-by: Matthew Sakai --- drivers/md/dm-vdo/admin-state.c | 8 +- drivers/md/dm-vdo/block-map.c | 20 ++-- drivers/md/dm-vdo/data-vio.c | 18 ++-- drivers/md/dm-vdo/dedupe.c | 36 ++++---- drivers/md/dm-vdo/dm-vdo-target.c | 144 ++++++++++++++--------------- drivers/md/dm-vdo/dump.c | 14 +-- drivers/md/dm-vdo/encodings.c | 26 +++--- drivers/md/dm-vdo/errors.c | 6 +- drivers/md/dm-vdo/errors.h | 4 +- drivers/md/dm-vdo/flush.c | 8 +- drivers/md/dm-vdo/funnel-workqueue.c | 2 +- drivers/md/dm-vdo/indexer/chapter-index.c | 4 +- drivers/md/dm-vdo/indexer/config.c | 48 +++++----- drivers/md/dm-vdo/indexer/delta-index.c | 50 +++++----- drivers/md/dm-vdo/indexer/index-layout.c | 82 ++++++++-------- drivers/md/dm-vdo/indexer/index-page-map.c | 2 +- drivers/md/dm-vdo/indexer/index-session.c | 44 ++++----- drivers/md/dm-vdo/indexer/index.c | 52 +++++------ drivers/md/dm-vdo/indexer/io-factory.c | 2 +- drivers/md/dm-vdo/indexer/open-chapter.c | 6 +- drivers/md/dm-vdo/indexer/volume-index.c | 46 ++++----- drivers/md/dm-vdo/indexer/volume.c | 64 ++++++------- drivers/md/dm-vdo/int-map.c | 2 +- drivers/md/dm-vdo/io-submitter.c | 4 +- drivers/md/dm-vdo/logger.c | 52 +++++------ drivers/md/dm-vdo/logger.h | 84 ++++++++--------- drivers/md/dm-vdo/logical-zone.c | 4 +- drivers/md/dm-vdo/memory-alloc.c | 14 +-- drivers/md/dm-vdo/message-stats.c | 2 +- drivers/md/dm-vdo/packer.c | 6 +- drivers/md/dm-vdo/permassert.c | 4 +- drivers/md/dm-vdo/physical-zone.c | 6 +- drivers/md/dm-vdo/recovery-journal.c | 16 ++-- drivers/md/dm-vdo/repair.c | 46 ++++----- drivers/md/dm-vdo/slab-depot.c | 54 +++++------ drivers/md/dm-vdo/status-codes.c | 6 +- drivers/md/dm-vdo/thread-utils.c | 2 +- drivers/md/dm-vdo/vdo.c | 14 +-- drivers/md/dm-vdo/vio.c | 10 +- 39 files changed, 506 insertions(+), 506 deletions(-) (limited to 'drivers/md/dm-vdo/vdo.c') diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c index d695af42d140..3f9dba525154 100644 --- a/drivers/md/dm-vdo/admin-state.c +++ b/drivers/md/dm-vdo/admin-state.c @@ -228,12 +228,12 @@ static int __must_check begin_operation(struct admin_state *state, const struct admin_state_code *next_state = get_next_state(state, operation); if (next_state == NULL) { - result = uds_log_error_strerror(VDO_INVALID_ADMIN_STATE, + result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "Can't start %s from %s", operation->name, vdo_get_admin_state_code(state)->name); } else if (state->waiter != NULL) { - result = uds_log_error_strerror(VDO_COMPONENT_BUSY, + result = vdo_log_error_strerror(VDO_COMPONENT_BUSY, "Can't start %s with extant waiter", operation->name); } else { @@ -291,7 +291,7 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch if (valid) return true; - result = uds_log_error_strerror(VDO_INVALID_ADMIN_STATE, + result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "%s is not a %s", code->name, what); if (waiter != NULL) vdo_continue_completion(waiter, result); @@ -334,7 +334,7 @@ bool vdo_start_draining(struct admin_state *state, } if (!code->normal) { - uds_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s", + vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s", operation->name, code->name); vdo_continue_completion(waiter, VDO_INVALID_ADMIN_STATE); return false; diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index b70294d8bb61..e79156dc7cc9 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -264,7 +264,7 @@ static void report_cache_pressure(struct vdo_page_cache *cache) ADD_ONCE(cache->stats.cache_pressure, 1); if (cache->waiter_count > cache->page_count) { if ((cache->pressure_report % LOG_INTERVAL) == 0) - uds_log_info("page cache pressure %u", cache->stats.cache_pressure); + vdo_log_info("page cache pressure %u", cache->stats.cache_pressure); if (++cache->pressure_report >= DISPLAY_INTERVAL) cache->pressure_report = 0; @@ -483,7 +483,7 @@ static void complete_with_page(struct page_info *info, bool available = vdo_page_comp->writable ? is_present(info) : is_valid(info); if (!available) { - uds_log_error_strerror(VDO_BAD_PAGE, + vdo_log_error_strerror(VDO_BAD_PAGE, "Requested cache page %llu in state %s is not %s", (unsigned long long) info->pbn, get_page_state_name(info->state), @@ -563,7 +563,7 @@ static void set_persistent_error(struct vdo_page_cache *cache, const char *conte struct vdo *vdo = cache->vdo; if ((result != VDO_READ_ONLY) && !vdo_is_read_only(vdo)) { - uds_log_error_strerror(result, "VDO Page Cache persistent error: %s", + vdo_log_error_strerror(result, "VDO Page Cache persistent error: %s", context); vdo_enter_read_only_mode(vdo, result); } @@ -704,7 +704,7 @@ static void page_is_loaded(struct vdo_completion *completion) validity = vdo_validate_block_map_page(page, nonce, info->pbn); if (validity == VDO_BLOCK_MAP_PAGE_BAD) { physical_block_number_t pbn = vdo_get_block_map_page_pbn(page); - int result = uds_log_error_strerror(VDO_BAD_PAGE, + int result = vdo_log_error_strerror(VDO_BAD_PAGE, "Expected page %llu but got page %llu instead", (unsigned long long) info->pbn, (unsigned long long) pbn); @@ -894,7 +894,7 @@ static void allocate_free_page(struct page_info *info) if (!vdo_waitq_has_waiters(&cache->free_waiters)) { if (cache->stats.cache_pressure > 0) { - uds_log_info("page cache pressure relieved"); + vdo_log_info("page cache pressure relieved"); WRITE_ONCE(cache->stats.cache_pressure, 0); } @@ -1012,7 +1012,7 @@ static void handle_page_write_error(struct vdo_completion *completion) /* If we're already read-only, write failures are to be expected. */ if (result != VDO_READ_ONLY) { - uds_log_ratelimit(uds_log_error, + vdo_log_ratelimit(vdo_log_error, "failed to write block map page %llu", (unsigned long long) info->pbn); } @@ -1397,7 +1397,7 @@ bool vdo_copy_valid_page(char *buffer, nonce_t nonce, } if (validity == VDO_BLOCK_MAP_PAGE_BAD) { - uds_log_error_strerror(VDO_BAD_PAGE, + vdo_log_error_strerror(VDO_BAD_PAGE, "Expected page %llu but got page %llu instead", (unsigned long long) pbn, (unsigned long long) vdo_get_block_map_page_pbn(loaded)); @@ -1785,7 +1785,7 @@ static void continue_with_loaded_page(struct data_vio *data_vio, vdo_unpack_block_map_entry(&page->entries[slot.block_map_slot.slot]); if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) { - uds_log_error_strerror(VDO_BAD_MAPPING, + vdo_log_error_strerror(VDO_BAD_MAPPING, "Invalid block map tree PBN: %llu with state %u for page index %u at height %u", (unsigned long long) mapping.pbn, mapping.state, lock->tree_slots[lock->height - 1].page_index, @@ -2263,7 +2263,7 @@ void vdo_find_block_map_slot(struct data_vio *data_vio) /* The page at this height has been allocated and loaded. */ mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]); if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) { - uds_log_error_strerror(VDO_BAD_MAPPING, + vdo_log_error_strerror(VDO_BAD_MAPPING, "Invalid block map tree PBN: %llu with state %u for page index %u at height %u", (unsigned long long) mapping.pbn, mapping.state, lock->tree_slots[lock->height - 1].page_index, @@ -3140,7 +3140,7 @@ static int __must_check set_mapped_location(struct data_vio *data_vio, * Log the corruption even if we wind up ignoring it for write VIOs, converting all cases * to VDO_BAD_MAPPING. */ - uds_log_error_strerror(VDO_BAD_MAPPING, + vdo_log_error_strerror(VDO_BAD_MAPPING, "PBN %llu with state %u read from the block map was invalid", (unsigned long long) mapped.pbn, mapped.state); diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 2b0d42c77e05..94f6f1ccfb7d 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -792,25 +792,25 @@ static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo) result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data", &data_vio->vio.data); if (result != VDO_SUCCESS) - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "data_vio data allocation failure"); result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block", &data_vio->compression.block); if (result != VDO_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "data_vio compressed block allocation failure"); } result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch", &data_vio->scratch_block); if (result != VDO_SUCCESS) - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "data_vio scratch allocation failure"); result = vdo_create_bio(&bio); if (result != VDO_SUCCESS) - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "data_vio data bio allocation failure"); vdo_initialize_completion(&data_vio->decrement_completion, vdo, @@ -1025,7 +1025,7 @@ void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *com static void dump_limiter(const char *name, struct limiter *limiter) { - uds_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy, + vdo_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy, limiter->limit, limiter->max_busy, ((bio_list_empty(&limiter->waiters) && bio_list_empty(&limiter->new_waiters)) ? @@ -1323,7 +1323,7 @@ static void perform_cleanup_stage(struct data_vio *data_vio, if ((data_vio->recovery_sequence_number > 0) && (READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) && (data_vio->vio.completion.result != VDO_READ_ONLY)) - uds_log_warning("VDO not read-only when cleaning data_vio with RJ lock"); + vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock"); fallthrough; case VIO_RELEASE_LOGICAL: @@ -1353,7 +1353,7 @@ static void enter_read_only_mode(struct vdo_completion *completion) if (completion->result != VDO_READ_ONLY) { struct data_vio *data_vio = as_data_vio(completion); - uds_log_error_strerror(completion->result, + vdo_log_error_strerror(completion->result, "Preparing to enter read-only mode: data_vio for LBN %llu (becoming mapped to %llu, previously mapped to %llu, allocated %llu) is completing with a fatal error after operation %s", (unsigned long long) data_vio->logical.lbn, (unsigned long long) data_vio->new_mapped.pbn, @@ -1449,14 +1449,14 @@ int uncompress_data_vio(struct data_vio *data_vio, &fragment_offset, &fragment_size); if (result != VDO_SUCCESS) { - uds_log_debug("%s: compressed fragment error %d", __func__, result); + vdo_log_debug("%s: compressed fragment error %d", __func__, result); return result; } size = LZ4_decompress_safe((block->data + fragment_offset), buffer, fragment_size, VDO_BLOCK_SIZE); if (size != VDO_BLOCK_SIZE) { - uds_log_debug("%s: lz4 error", __func__); + vdo_log_debug("%s: lz4 error", __func__); return VDO_INVALID_FRAGMENT; } diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index c031ab01054d..117266e1b3ae 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -1287,7 +1287,7 @@ static bool acquire_provisional_reference(struct data_vio *agent, struct pbn_loc if (result == VDO_SUCCESS) return true; - uds_log_warning_strerror(result, + vdo_log_warning_strerror(result, "Error acquiring provisional reference for dedupe candidate; aborting dedupe"); agent->is_duplicate = false; vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, @@ -1614,7 +1614,7 @@ static bool decode_uds_advice(struct dedupe_context *context) version = encoding->data[offset++]; if (version != UDS_ADVICE_VERSION) { - uds_log_error("invalid UDS advice version code %u", version); + vdo_log_error("invalid UDS advice version code %u", version); return false; } @@ -1625,7 +1625,7 @@ static bool decode_uds_advice(struct dedupe_context *context) /* Don't use advice that's clearly meaningless. */ if ((advice->state == VDO_MAPPING_STATE_UNMAPPED) || (advice->pbn == VDO_ZERO_BLOCK)) { - uds_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu", + vdo_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu", (unsigned long long) advice->pbn, advice->state, (unsigned long long) data_vio->logical.lbn); atomic64_inc(&vdo->stats.invalid_advice_pbn_count); @@ -1634,7 +1634,7 @@ static bool decode_uds_advice(struct dedupe_context *context) result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone); if ((result != VDO_SUCCESS) || (advice->zone == NULL)) { - uds_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu", + vdo_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu", (unsigned long long) advice->pbn, (unsigned long long) data_vio->logical.lbn); atomic64_inc(&vdo->stats.invalid_advice_pbn_count); @@ -2053,7 +2053,7 @@ static void close_index(struct hash_zones *zones) result = uds_close_index(zones->index_session); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Error closing index"); + vdo_log_error_strerror(result, "Error closing index"); spin_lock(&zones->lock); zones->index_state = IS_CLOSED; zones->error_flag |= result != UDS_SUCCESS; @@ -2080,7 +2080,7 @@ static void open_index(struct hash_zones *zones) result = uds_open_index(create_flag ? UDS_CREATE : UDS_LOAD, &zones->parameters, zones->index_session); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Error opening index"); + vdo_log_error_strerror(result, "Error opening index"); spin_lock(&zones->lock); if (!create_flag) { @@ -2104,7 +2104,7 @@ static void open_index(struct hash_zones *zones) zones->index_target = IS_CLOSED; zones->error_flag = true; spin_unlock(&zones->lock); - uds_log_info("Setting UDS index target state to error"); + vdo_log_info("Setting UDS index target state to error"); spin_lock(&zones->lock); } /* @@ -2160,7 +2160,7 @@ static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeou u64 unreported = atomic64_read(&zones->timeouts); unreported -= zones->reported_timeouts; - uds_log_debug("UDS index timeout on %llu requests", + vdo_log_debug("UDS index timeout on %llu requests", (unsigned long long) unreported); zones->reported_timeouts += unreported; } @@ -2207,7 +2207,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones) 1, NULL); if (result != VDO_SUCCESS) { uds_destroy_index_session(vdo_forget(zones->index_session)); - uds_log_error("UDS index queue initialization failed (%d)", result); + vdo_log_error("UDS index queue initialization failed (%d)", result); return result; } @@ -2502,7 +2502,7 @@ static void initiate_suspend_index(struct admin_state *state) result = uds_suspend_index_session(zones->index_session, save); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Error suspending dedupe index"); + vdo_log_error_strerror(result, "Error suspending dedupe index"); } vdo_finish_draining(state); @@ -2585,7 +2585,7 @@ static void resume_index(void *context, struct vdo_completion *parent) zones->parameters.bdev = config->owned_device->bdev; result = uds_resume_index_session(zones->index_session, zones->parameters.bdev); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Error resuming dedupe index"); + vdo_log_error_strerror(result, "Error resuming dedupe index"); spin_lock(&zones->lock); vdo_resume_if_quiescent(&zones->state); @@ -2665,7 +2665,7 @@ static void get_index_statistics(struct hash_zones *zones, result = uds_get_index_session_stats(zones->index_session, &index_stats); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, "Error reading index stats"); + vdo_log_error_strerror(result, "Error reading index stats"); return; } @@ -2750,7 +2750,7 @@ static void dump_hash_lock(const struct hash_lock *lock) * unambiguous. 'U' indicates a lock not registered in the map. */ state = get_hash_lock_state_name(lock->state); - uds_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px", + vdo_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px", lock, state, (lock->registered ? 'D' : 'U'), (unsigned long long) lock->duplicate.pbn, lock->duplicate.state, lock->reference_count, @@ -2784,11 +2784,11 @@ static void dump_hash_zone(const struct hash_zone *zone) data_vio_count_t i; if (zone->hash_lock_map == NULL) { - uds_log_info("struct hash_zone %u: NULL map", zone->zone_number); + vdo_log_info("struct hash_zone %u: NULL map", zone->zone_number); return; } - uds_log_info("struct hash_zone %u: mapSize=%zu", + vdo_log_info("struct hash_zone %u: mapSize=%zu", zone->zone_number, vdo_int_map_size(zone->hash_lock_map)); for (i = 0; i < LOCK_POOL_CAPACITY; i++) dump_hash_lock(&zone->lock_array[i]); @@ -2808,9 +2808,9 @@ void vdo_dump_hash_zones(struct hash_zones *zones) target = (zones->changing ? index_state_to_string(zones, zones->index_target) : NULL); spin_unlock(&zones->lock); - uds_log_info("UDS index: state: %s", state); + vdo_log_info("UDS index: state: %s", state); if (target != NULL) - uds_log_info("UDS index: changing to state: %s", target); + vdo_log_info("UDS index: changing to state: %s", target); for (zone = 0; zone < zones->zone_count; zone++) dump_hash_zone(&zones->zones[zone]); @@ -2957,7 +2957,7 @@ static void set_target_state(struct hash_zones *zones, enum index_state target, spin_unlock(&zones->lock); if (old_state != new_state) - uds_log_info("Setting UDS index target state to %s", new_state); + vdo_log_info("Setting UDS index target state to %s", new_state); } const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones) diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 288e9b79bf16..4908996f5224 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -232,9 +232,9 @@ static int get_version_number(int argc, char **argv, char **error_ptr, } if (*version_ptr != TABLE_VERSION) { - uds_log_warning("Detected version mismatch between kernel module and tools kernel: %d, tool: %d", + vdo_log_warning("Detected version mismatch between kernel module and tools kernel: %d, tool: %d", TABLE_VERSION, *version_ptr); - uds_log_warning("Please consider upgrading management tools to match kernel."); + vdo_log_warning("Please consider upgrading management tools to match kernel."); } return VDO_SUCCESS; } @@ -399,10 +399,10 @@ static int process_one_thread_config_spec(const char *thread_param_type, /* Handle limited thread parameters */ if (strcmp(thread_param_type, "bioRotationInterval") == 0) { if (count == 0) { - uds_log_error("thread config string error: 'bioRotationInterval' of at least 1 is required"); + vdo_log_error("thread config string error: 'bioRotationInterval' of at least 1 is required"); return -EINVAL; } else if (count > VDO_BIO_ROTATION_INTERVAL_LIMIT) { - uds_log_error("thread config string error: 'bioRotationInterval' cannot be higher than %d", + vdo_log_error("thread config string error: 'bioRotationInterval' cannot be higher than %d", VDO_BIO_ROTATION_INTERVAL_LIMIT); return -EINVAL; } @@ -411,7 +411,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, } if (strcmp(thread_param_type, "logical") == 0) { if (count > MAX_VDO_LOGICAL_ZONES) { - uds_log_error("thread config string error: at most %d 'logical' threads are allowed", + vdo_log_error("thread config string error: at most %d 'logical' threads are allowed", MAX_VDO_LOGICAL_ZONES); return -EINVAL; } @@ -420,7 +420,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, } if (strcmp(thread_param_type, "physical") == 0) { if (count > MAX_VDO_PHYSICAL_ZONES) { - uds_log_error("thread config string error: at most %d 'physical' threads are allowed", + vdo_log_error("thread config string error: at most %d 'physical' threads are allowed", MAX_VDO_PHYSICAL_ZONES); return -EINVAL; } @@ -429,7 +429,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, } /* Handle other thread count parameters */ if (count > MAXIMUM_VDO_THREADS) { - uds_log_error("thread config string error: at most %d '%s' threads are allowed", + vdo_log_error("thread config string error: at most %d '%s' threads are allowed", MAXIMUM_VDO_THREADS, thread_param_type); return -EINVAL; } @@ -439,7 +439,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, } if (strcmp(thread_param_type, "cpu") == 0) { if (count == 0) { - uds_log_error("thread config string error: at least one 'cpu' thread required"); + vdo_log_error("thread config string error: at least one 'cpu' thread required"); return -EINVAL; } config->cpu_threads = count; @@ -451,7 +451,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, } if (strcmp(thread_param_type, "bio") == 0) { if (count == 0) { - uds_log_error("thread config string error: at least one 'bio' thread required"); + vdo_log_error("thread config string error: at least one 'bio' thread required"); return -EINVAL; } config->bio_threads = count; @@ -462,7 +462,7 @@ static int process_one_thread_config_spec(const char *thread_param_type, * Don't fail, just log. This will handle version mismatches between user mode tools and * kernel. */ - uds_log_info("unknown thread parameter type \"%s\"", thread_param_type); + vdo_log_info("unknown thread parameter type \"%s\"", thread_param_type); return VDO_SUCCESS; } @@ -484,7 +484,7 @@ static int parse_one_thread_config_spec(const char *spec, return result; if ((fields[0] == NULL) || (fields[1] == NULL) || (fields[2] != NULL)) { - uds_log_error("thread config string error: expected thread parameter assignment, saw \"%s\"", + vdo_log_error("thread config string error: expected thread parameter assignment, saw \"%s\"", spec); free_string_array(fields); return -EINVAL; @@ -492,7 +492,7 @@ static int parse_one_thread_config_spec(const char *spec, result = kstrtouint(fields[1], 10, &count); if (result) { - uds_log_error("thread config string error: integer value needed, found \"%s\"", + vdo_log_error("thread config string error: integer value needed, found \"%s\"", fields[1]); free_string_array(fields); return result; @@ -564,12 +564,12 @@ static int process_one_key_value_pair(const char *key, unsigned int value, /* Non thread optional parameters */ if (strcmp(key, "maxDiscard") == 0) { if (value == 0) { - uds_log_error("optional parameter error: at least one max discard block required"); + vdo_log_error("optional parameter error: at least one max discard block required"); return -EINVAL; } /* Max discard sectors in blkdev_issue_discard is UINT_MAX >> 9 */ if (value > (UINT_MAX / VDO_BLOCK_SIZE)) { - uds_log_error("optional parameter error: at most %d max discard blocks are allowed", + vdo_log_error("optional parameter error: at most %d max discard blocks are allowed", UINT_MAX / VDO_BLOCK_SIZE); return -EINVAL; } @@ -604,7 +604,7 @@ static int parse_one_key_value_pair(const char *key, const char *value, /* The remaining arguments must have integral values. */ result = kstrtouint(value, 10, &count); if (result) { - uds_log_error("optional config string error: integer value needed, found \"%s\"", + vdo_log_error("optional config string error: integer value needed, found \"%s\"", value); return result; } @@ -745,7 +745,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti, return VDO_BAD_CONFIGURATION; } - uds_log_info("table line: %s", config->original_string); + vdo_log_info("table line: %s", config->original_string); config->thread_counts = (struct thread_count_config) { .bio_ack_threads = 1, @@ -872,7 +872,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti, result = dm_get_device(ti, config->parent_device_name, dm_table_get_mode(ti->table), &config->owned_device); if (result != 0) { - uds_log_error("couldn't open device \"%s\": error %d", + vdo_log_error("couldn't open device \"%s\": error %d", config->parent_device_name, result); handle_parse_error(config, error_ptr, "Unable to open storage device"); return VDO_BAD_CONFIGURATION; @@ -1029,12 +1029,12 @@ static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int return 0; } - uds_log_warning("invalid argument '%s' to dmsetup compression message", + vdo_log_warning("invalid argument '%s' to dmsetup compression message", argv[1]); return -EINVAL; } - uds_log_warning("unrecognized dmsetup message '%s' received", argv[0]); + vdo_log_warning("unrecognized dmsetup message '%s' received", argv[0]); return -EINVAL; } @@ -1091,7 +1091,7 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv, int result; if (argc == 0) { - uds_log_warning("unspecified dmsetup message"); + vdo_log_warning("unspecified dmsetup message"); return -EINVAL; } @@ -1211,7 +1211,7 @@ static int perform_admin_operation(struct vdo *vdo, u32 starting_phase, struct vdo_administrator *admin = &vdo->admin; if (atomic_cmpxchg(&admin->busy, 0, 1) != 0) { - return uds_log_error_strerror(VDO_COMPONENT_BUSY, + return vdo_log_error_strerror(VDO_COMPONENT_BUSY, "Can't start %s operation, another operation is already in progress", type); } @@ -1285,7 +1285,7 @@ static int __must_check decode_from_super_block(struct vdo *vdo) * block, just accept it. */ if (vdo->states.vdo.config.logical_blocks < config->logical_blocks) { - uds_log_warning("Growing logical size: a logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", + vdo_log_warning("Growing logical size: a logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", (unsigned long long) config->logical_blocks, (unsigned long long) vdo->states.vdo.config.logical_blocks); vdo->states.vdo.config.logical_blocks = config->logical_blocks; @@ -1328,14 +1328,14 @@ static int __must_check decode_vdo(struct vdo *vdo) journal_length = vdo_get_recovery_journal_length(vdo->states.vdo.config.recovery_journal_size); if (maximum_age > (journal_length / 2)) { - return uds_log_error_strerror(VDO_BAD_CONFIGURATION, + return vdo_log_error_strerror(VDO_BAD_CONFIGURATION, "maximum age: %llu exceeds limit %llu", (unsigned long long) maximum_age, (unsigned long long) (journal_length / 2)); } if (maximum_age == 0) { - return uds_log_error_strerror(VDO_BAD_CONFIGURATION, + return vdo_log_error_strerror(VDO_BAD_CONFIGURATION, "maximum age must be greater than 0"); } @@ -1451,19 +1451,19 @@ static int vdo_initialize(struct dm_target *ti, unsigned int instance, u64 logical_size = to_bytes(ti->len); block_count_t logical_blocks = logical_size / block_size; - uds_log_info("loading device '%s'", vdo_get_device_name(ti)); - uds_log_debug("Logical block size = %llu", (u64) config->logical_block_size); - uds_log_debug("Logical blocks = %llu", logical_blocks); - uds_log_debug("Physical block size = %llu", (u64) block_size); - uds_log_debug("Physical blocks = %llu", config->physical_blocks); - uds_log_debug("Block map cache blocks = %u", config->cache_size); - uds_log_debug("Block map maximum age = %u", config->block_map_maximum_age); - uds_log_debug("Deduplication = %s", (config->deduplication ? "on" : "off")); - uds_log_debug("Compression = %s", (config->compression ? "on" : "off")); + vdo_log_info("loading device '%s'", vdo_get_device_name(ti)); + vdo_log_debug("Logical block size = %llu", (u64) config->logical_block_size); + vdo_log_debug("Logical blocks = %llu", logical_blocks); + vdo_log_debug("Physical block size = %llu", (u64) block_size); + vdo_log_debug("Physical blocks = %llu", config->physical_blocks); + vdo_log_debug("Block map cache blocks = %u", config->cache_size); + vdo_log_debug("Block map maximum age = %u", config->block_map_maximum_age); + vdo_log_debug("Deduplication = %s", (config->deduplication ? "on" : "off")); + vdo_log_debug("Compression = %s", (config->compression ? "on" : "off")); vdo = vdo_find_matching(vdo_uses_device, config); if (vdo != NULL) { - uds_log_error("Existing vdo already uses device %s", + vdo_log_error("Existing vdo already uses device %s", vdo->device_config->parent_device_name); ti->error = "Cannot share storage device with already-running VDO"; return VDO_BAD_CONFIGURATION; @@ -1471,7 +1471,7 @@ static int vdo_initialize(struct dm_target *ti, unsigned int instance, result = vdo_make(instance, config, &ti->error, &vdo); if (result != VDO_SUCCESS) { - uds_log_error("Could not create VDO device. (VDO error %d, message %s)", + vdo_log_error("Could not create VDO device. (VDO error %d, message %s)", result, ti->error); vdo_destroy(vdo); return result; @@ -1483,7 +1483,7 @@ static int vdo_initialize(struct dm_target *ti, unsigned int instance, ti->error = ((result == VDO_INVALID_ADMIN_STATE) ? "Pre-load is only valid immediately after initialization" : "Cannot load metadata from device"); - uds_log_error("Could not start VDO device. (VDO error %d, message %s)", + vdo_log_error("Could not start VDO device. (VDO error %d, message %s)", result, ti->error); vdo_destroy(vdo); return result; @@ -1594,7 +1594,7 @@ static int construct_new_vdo_registered(struct dm_target *ti, unsigned int argc, result = parse_device_config(argc, argv, ti, &config); if (result != VDO_SUCCESS) { - uds_log_error_strerror(result, "parsing failed: %s", ti->error); + vdo_log_error_strerror(result, "parsing failed: %s", ti->error); release_instance(instance); return -EINVAL; } @@ -1723,7 +1723,7 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_ int result; block_count_t current_physical_blocks = vdo->states.vdo.config.physical_blocks; - uds_log_info("Preparing to resize physical to %llu", + vdo_log_info("Preparing to resize physical to %llu", (unsigned long long) new_physical_blocks); VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), "New physical size is larger than current physical size"); @@ -1746,7 +1746,7 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_ return result; } - uds_log_info("Done preparing to resize physical"); + vdo_log_info("Done preparing to resize physical"); return VDO_SUCCESS; } @@ -1823,7 +1823,7 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config, if (config->logical_blocks > vdo->device_config->logical_blocks) { block_count_t logical_blocks = vdo->states.vdo.config.logical_blocks; - uds_log_info("Preparing to resize logical to %llu", + vdo_log_info("Preparing to resize logical to %llu", (unsigned long long) config->logical_blocks); VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), "New logical size is larger than current size"); @@ -1835,7 +1835,7 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config, return result; } - uds_log_info("Done preparing to resize logical"); + vdo_log_info("Done preparing to resize logical"); } if (config->physical_blocks > vdo->device_config->physical_blocks) { @@ -1861,7 +1861,7 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config, if (strcmp(config->parent_device_name, vdo->device_config->parent_device_name) != 0) { const char *device_name = vdo_get_device_name(config->owning_target); - uds_log_info("Updating backing device of %s from %s to %s", device_name, + vdo_log_info("Updating backing device of %s from %s to %s", device_name, vdo->device_config->parent_device_name, config->parent_device_name); } @@ -1879,7 +1879,7 @@ static int update_existing_vdo(const char *device_name, struct dm_target *ti, if (result != VDO_SUCCESS) return -EINVAL; - uds_log_info("preparing to modify device '%s'", device_name); + vdo_log_info("preparing to modify device '%s'", device_name); result = prepare_to_modify(ti, config, vdo); if (result != VDO_SUCCESS) { free_device_config(config); @@ -1929,12 +1929,12 @@ static void vdo_dtr(struct dm_target *ti) vdo_register_allocating_thread(&allocating_thread, NULL); device_name = vdo_get_device_name(ti); - uds_log_info("stopping device '%s'", device_name); + vdo_log_info("stopping device '%s'", device_name); if (vdo->dump_on_shutdown) vdo_dump_all(vdo, "device shutdown"); vdo_destroy(vdo_forget(vdo)); - uds_log_info("device '%s' stopped", device_name); + vdo_log_info("device '%s' stopped", device_name); vdo_unregister_thread_device_id(); vdo_unregister_allocating_thread(); release_instance(instance); @@ -2096,7 +2096,7 @@ static void vdo_postsuspend(struct dm_target *ti) vdo_register_thread_device_id(&instance_thread, &vdo->instance); device_name = vdo_get_device_name(vdo->device_config->owning_target); - uds_log_info("suspending device '%s'", device_name); + vdo_log_info("suspending device '%s'", device_name); /* * It's important to note any error here does not actually stop device-mapper from @@ -2110,12 +2110,12 @@ static void vdo_postsuspend(struct dm_target *ti) * Treat VDO_READ_ONLY as a success since a read-only suspension still leaves the * VDO suspended. */ - uds_log_info("device '%s' suspended", device_name); + vdo_log_info("device '%s' suspended", device_name); } else if (result == VDO_INVALID_ADMIN_STATE) { - uds_log_error("Suspend invoked while in unexpected state: %s", + vdo_log_error("Suspend invoked while in unexpected state: %s", vdo_get_admin_state(vdo)->name); } else { - uds_log_error_strerror(result, "Suspend of device '%s' failed", + vdo_log_error_strerror(result, "Suspend of device '%s' failed", device_name); } @@ -2288,13 +2288,13 @@ static void handle_load_error(struct vdo_completion *completion) if (vdo_state_requires_read_only_rebuild(vdo->load_state) && (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) { - uds_log_error_strerror(completion->result, "aborting load"); + vdo_log_error_strerror(completion->result, "aborting load"); vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL; load_callback(vdo_forget(completion)); return; } - uds_log_error_strerror(completion->result, + vdo_log_error_strerror(completion->result, "Entering read-only mode due to load error"); vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY; vdo_enter_read_only_mode(vdo, completion->result); @@ -2386,7 +2386,7 @@ static void resume_callback(struct vdo_completion *completion) if (enable != was_enabled) WRITE_ONCE(vdo->compressing, enable); - uds_log_info("compression is %s", (enable ? "enabled" : "disabled")); + vdo_log_info("compression is %s", (enable ? "enabled" : "disabled")); vdo_resume_packer(vdo->packer, completion); return; @@ -2426,7 +2426,7 @@ static void grow_logical_callback(struct vdo_completion *completion) switch (advance_phase(vdo)) { case GROW_LOGICAL_PHASE_START: if (vdo_is_read_only(vdo)) { - uds_log_error_strerror(VDO_READ_ONLY, + vdo_log_error_strerror(VDO_READ_ONLY, "Can't grow logical size of a read-only VDO"); vdo_set_completion_result(completion, VDO_READ_ONLY); break; @@ -2505,7 +2505,7 @@ static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_block return VDO_SUCCESS; } - uds_log_info("Resizing logical to %llu", + vdo_log_info("Resizing logical to %llu", (unsigned long long) new_logical_blocks); if (vdo->block_map->next_entry_count != new_logical_blocks) return VDO_PARAMETER_MISMATCH; @@ -2516,7 +2516,7 @@ static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_block if (result != VDO_SUCCESS) return result; - uds_log_info("Logical blocks now %llu", (unsigned long long) new_logical_blocks); + vdo_log_info("Logical blocks now %llu", (unsigned long long) new_logical_blocks); return VDO_SUCCESS; } @@ -2576,7 +2576,7 @@ static void grow_physical_callback(struct vdo_completion *completion) switch (advance_phase(vdo)) { case GROW_PHYSICAL_PHASE_START: if (vdo_is_read_only(vdo)) { - uds_log_error_strerror(VDO_READ_ONLY, + vdo_log_error_strerror(VDO_READ_ONLY, "Can't grow physical size of a read-only VDO"); vdo_set_completion_result(completion, VDO_READ_ONLY); break; @@ -2685,7 +2685,7 @@ static int perform_grow_physical(struct vdo *vdo, block_count_t new_physical_blo if (result != VDO_SUCCESS) return result; - uds_log_info("Physical block count was %llu, now %llu", + vdo_log_info("Physical block count was %llu, now %llu", (unsigned long long) old_physical_blocks, (unsigned long long) new_physical_blocks); return VDO_SUCCESS; @@ -2707,13 +2707,13 @@ static int __must_check apply_new_vdo_configuration(struct vdo *vdo, result = perform_grow_logical(vdo, config->logical_blocks); if (result != VDO_SUCCESS) { - uds_log_error("grow logical operation failed, result = %d", result); + vdo_log_error("grow logical operation failed, result = %d", result); return result; } result = perform_grow_physical(vdo, config->physical_blocks); if (result != VDO_SUCCESS) - uds_log_error("resize operation failed, result = %d", result); + vdo_log_error("resize operation failed, result = %d", result); return result; } @@ -2728,14 +2728,14 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) backing_blocks = get_underlying_device_block_count(vdo); if (backing_blocks < config->physical_blocks) { /* FIXME: can this still happen? */ - uds_log_error("resume of device '%s' failed: backing device has %llu blocks but VDO physical size is %llu blocks", + vdo_log_error("resume of device '%s' failed: backing device has %llu blocks but VDO physical size is %llu blocks", device_name, (unsigned long long) backing_blocks, (unsigned long long) config->physical_blocks); return -EINVAL; } if (vdo_get_admin_state(vdo) == VDO_ADMIN_STATE_PRE_LOADED) { - uds_log_info("starting device '%s'", device_name); + vdo_log_info("starting device '%s'", device_name); result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback, handle_load_error, "load"); if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) { @@ -2743,7 +2743,7 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) * Something has gone very wrong. Make sure everything has drained and * leave the device in an unresumable state. */ - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "Start failed, could not load VDO metadata"); vdo->suspend_type = VDO_ADMIN_STATE_STOPPING; perform_admin_operation(vdo, SUSPEND_PHASE_START, @@ -2753,10 +2753,10 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) } /* Even if the VDO is read-only, it is now able to handle read requests. */ - uds_log_info("device '%s' started", device_name); + vdo_log_info("device '%s' started", device_name); } - uds_log_info("resuming device '%s'", device_name); + vdo_log_info("resuming device '%s'", device_name); /* If this fails, the VDO was not in a state to be resumed. This should never happen. */ result = apply_new_vdo_configuration(vdo, config); @@ -2774,7 +2774,7 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) * written to disk. */ if (result != VDO_SUCCESS) { - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "Commit of modifications to device '%s' failed", device_name); vdo_enter_read_only_mode(vdo, result); @@ -2795,7 +2795,7 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) } if (result != VDO_SUCCESS) - uds_log_error("resume of device '%s' failed with error: %d", device_name, + vdo_log_error("resume of device '%s' failed with error: %d", device_name, result); return result; @@ -2821,7 +2821,7 @@ static void vdo_resume(struct dm_target *ti) vdo_register_thread_device_id(&instance_thread, &get_vdo_for_target(ti)->instance); - uds_log_info("device '%s' resumed", vdo_get_device_name(ti)); + vdo_log_info("device '%s' resumed", vdo_get_device_name(ti)); vdo_unregister_thread_device_id(); } @@ -2852,7 +2852,7 @@ static bool dm_registered; static void vdo_module_destroy(void) { - uds_log_debug("unloading"); + vdo_log_debug("unloading"); if (dm_registered) dm_unregister_target(&vdo_target_bio); @@ -2863,7 +2863,7 @@ static void vdo_module_destroy(void) vdo_free(instances.words); memset(&instances, 0, sizeof(struct instance_tracker)); - uds_log_info("unloaded version %s", CURRENT_VERSION); + vdo_log_info("unloaded version %s", CURRENT_VERSION); } static int __init vdo_init(void) @@ -2874,19 +2874,19 @@ static int __init vdo_init(void) vdo_memory_init(); vdo_initialize_thread_device_registry(); vdo_initialize_device_registry_once(); - uds_log_info("loaded version %s", CURRENT_VERSION); + vdo_log_info("loaded version %s", CURRENT_VERSION); /* Add VDO errors to the set of errors registered by the indexer. */ result = vdo_register_status_codes(); if (result != VDO_SUCCESS) { - uds_log_error("vdo_register_status_codes failed %d", result); + vdo_log_error("vdo_register_status_codes failed %d", result); vdo_module_destroy(); return result; } result = dm_register_target(&vdo_target_bio); if (result < 0) { - uds_log_error("dm_register_target failed %d", result); + vdo_log_error("dm_register_target failed %d", result); vdo_module_destroy(); return result; } diff --git a/drivers/md/dm-vdo/dump.c b/drivers/md/dm-vdo/dump.c index 52ee9a72781c..00e575d7d773 100644 --- a/drivers/md/dm-vdo/dump.c +++ b/drivers/md/dm-vdo/dump.c @@ -58,12 +58,12 @@ static void do_dump(struct vdo *vdo, unsigned int dump_options_requested, u32 active, maximum; s64 outstanding; - uds_log_info("%s dump triggered via %s", UDS_LOGGING_MODULE_NAME, why); + vdo_log_info("%s dump triggered via %s", VDO_LOGGING_MODULE_NAME, why); active = get_data_vio_pool_active_requests(vdo->data_vio_pool); maximum = get_data_vio_pool_maximum_requests(vdo->data_vio_pool); outstanding = (atomic64_read(&vdo->stats.bios_submitted) - atomic64_read(&vdo->stats.bios_completed)); - uds_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'", + vdo_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'", active, maximum, outstanding, vdo_get_device_name(vdo->device_config->owning_target)); if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) { @@ -80,7 +80,7 @@ static void do_dump(struct vdo *vdo, unsigned int dump_options_requested, vdo_dump_status(vdo); vdo_report_memory_usage(); - uds_log_info("end of %s dump", UDS_LOGGING_MODULE_NAME); + vdo_log_info("end of %s dump", VDO_LOGGING_MODULE_NAME); } static int parse_dump_options(unsigned int argc, char *const *argv, @@ -114,7 +114,7 @@ static int parse_dump_options(unsigned int argc, char *const *argv, } } if (j == ARRAY_SIZE(option_names)) { - uds_log_warning("dump option name '%s' unknown", argv[i]); + vdo_log_warning("dump option name '%s' unknown", argv[i]); options_okay = false; } } @@ -159,13 +159,13 @@ static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on) data_vio = vdo_waiter_as_data_vio(first); - uds_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", + vdo_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio)); for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) { data_vio = vdo_waiter_as_data_vio(waiter); - uds_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", + vdo_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio)); @@ -258,7 +258,7 @@ void dump_data_vio(void *data) encode_vio_dump_flags(data_vio, flags_dump_buffer); - uds_log_info(" vio %px %s%s %s %s%s", data_vio, + vdo_log_info(" vio %px %s%s %s %s%s", data_vio, vio_block_number_dump_buffer, vio_flush_generation_buffer, get_data_vio_operation_name(data_vio), diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index ebb0a4edd109..a34ea0229d53 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -146,7 +146,7 @@ static int __must_check validate_version(struct version_number expected_version, const char *component_name) { if (!vdo_are_same_version(expected_version, actual_version)) { - return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "%s version mismatch, expected %d.%d, got %d.%d", component_name, expected_version.major_version, @@ -179,7 +179,7 @@ int vdo_validate_header(const struct header *expected_header, int result; if (expected_header->id != actual_header->id) { - return uds_log_error_strerror(VDO_INCORRECT_COMPONENT, + return vdo_log_error_strerror(VDO_INCORRECT_COMPONENT, "%s ID mismatch, expected %d, got %d", name, expected_header->id, actual_header->id); @@ -192,7 +192,7 @@ int vdo_validate_header(const struct header *expected_header, if ((expected_header->size > actual_header->size) || (exact_size && (expected_header->size < actual_header->size))) { - return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "%s size mismatch, expected %zu, got %zu", name, expected_header->size, actual_header->size); @@ -653,7 +653,7 @@ int vdo_configure_slab_depot(const struct partition *partition, physical_block_number_t last_block; block_count_t slab_size = slab_config.slab_blocks; - uds_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)", + vdo_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)", __func__, (unsigned long long) partition->count, (unsigned long long) partition->offset, (unsigned long long) slab_size, zone_count); @@ -677,7 +677,7 @@ int vdo_configure_slab_depot(const struct partition *partition, .zone_count = zone_count, }; - uds_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu", + vdo_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu", (unsigned long long) last_block, (unsigned long long) total_data_blocks, slab_count, (unsigned long long) (partition->count - (last_block - partition->offset))); @@ -875,7 +875,7 @@ int vdo_initialize_layout(block_count_t size, physical_block_number_t offset, (offset + block_map_blocks + journal_blocks + summary_blocks); if (necessary_size > size) - return uds_log_error_strerror(VDO_NO_SPACE, + return vdo_log_error_strerror(VDO_NO_SPACE, "Not enough space to make a VDO"); *layout = (struct layout) { @@ -1045,7 +1045,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta layout->num_partitions = layout_header.partition_count; if (layout->num_partitions > VDO_PARTITION_COUNT) { - return uds_log_error_strerror(VDO_UNKNOWN_PARTITION, + return vdo_log_error_strerror(VDO_UNKNOWN_PARTITION, "layout has extra partitions"); } @@ -1070,7 +1070,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta result = vdo_get_partition(layout, REQUIRED_PARTITIONS[i], &partition); if (result != VDO_SUCCESS) { vdo_uninitialize_layout(layout); - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "layout is missing required partition %u", REQUIRED_PARTITIONS[i]); } @@ -1080,7 +1080,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta if (start != size) { vdo_uninitialize_layout(layout); - return uds_log_error_strerror(UDS_BAD_STATE, + return vdo_log_error_strerror(UDS_BAD_STATE, "partitions do not cover the layout"); } @@ -1253,7 +1253,7 @@ int vdo_validate_config(const struct vdo_config *config, return VDO_OUT_OF_RANGE; if (physical_block_count != config->physical_blocks) { - uds_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block", + vdo_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block", (unsigned long long) physical_block_count, (unsigned long long) config->physical_blocks); return VDO_PARAMETER_MISMATCH; @@ -1266,7 +1266,7 @@ int vdo_validate_config(const struct vdo_config *config, return result; if (logical_block_count != config->logical_blocks) { - uds_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", + vdo_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", (unsigned long long) logical_block_count, (unsigned long long) config->logical_blocks); return VDO_PARAMETER_MISMATCH; @@ -1390,7 +1390,7 @@ int vdo_validate_component_states(struct vdo_component_states *states, block_count_t logical_size) { if (geometry_nonce != states->vdo.nonce) { - return uds_log_error_strerror(VDO_BAD_NONCE, + return vdo_log_error_strerror(VDO_BAD_NONCE, "Geometry nonce %llu does not match superblock nonce %llu", (unsigned long long) geometry_nonce, (unsigned long long) states->vdo.nonce); @@ -1463,7 +1463,7 @@ int vdo_decode_super_block(u8 *buffer) * We can't check release version or checksum until we know the content size, so we * have to assume a version mismatch on unexpected values. */ - return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "super block contents too large: %zu", header.size); } diff --git a/drivers/md/dm-vdo/errors.c b/drivers/md/dm-vdo/errors.c index 3b5fddad8ddf..8b2d22381274 100644 --- a/drivers/md/dm-vdo/errors.c +++ b/drivers/md/dm-vdo/errors.c @@ -215,8 +215,8 @@ const char *uds_string_error_name(int errnum, char *buf, size_t buflen) */ int uds_status_to_errno(int error) { - char error_name[UDS_MAX_ERROR_NAME_SIZE]; - char error_message[UDS_MAX_ERROR_MESSAGE_SIZE]; + char error_name[VDO_MAX_ERROR_NAME_SIZE]; + char error_message[VDO_MAX_ERROR_MESSAGE_SIZE]; /* 0 is success, and negative values are already system error codes. */ if (likely(error <= 0)) @@ -248,7 +248,7 @@ int uds_status_to_errno(int error) default: /* Translate an unexpected error into something generic. */ - uds_log_info("%s: mapping status code %d (%s: %s) to -EIO", + vdo_log_info("%s: mapping status code %d (%s: %s) to -EIO", __func__, error, uds_string_error_name(error, error_name, sizeof(error_name)), diff --git a/drivers/md/dm-vdo/errors.h b/drivers/md/dm-vdo/errors.h index c6c085b26a0e..24e0e745fd5f 100644 --- a/drivers/md/dm-vdo/errors.h +++ b/drivers/md/dm-vdo/errors.h @@ -51,8 +51,8 @@ enum uds_status_codes { }; enum { - UDS_MAX_ERROR_NAME_SIZE = 80, - UDS_MAX_ERROR_MESSAGE_SIZE = 128, + VDO_MAX_ERROR_NAME_SIZE = 80, + VDO_MAX_ERROR_MESSAGE_SIZE = 128, }; struct error_info { diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index e03679e4d1ba..57e87f0d7069 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -108,7 +108,7 @@ static void *allocate_flush(gfp_t gfp_mask, void *pool_data) int result = vdo_allocate(1, struct vdo_flush, __func__, &flush); if (result != VDO_SUCCESS) - uds_log_error_strerror(result, "failed to allocate spare flush"); + vdo_log_error_strerror(result, "failed to allocate spare flush"); } if (flush != NULL) { @@ -349,11 +349,11 @@ void vdo_complete_flushes(struct flusher *flusher) */ void vdo_dump_flusher(const struct flusher *flusher) { - uds_log_info("struct flusher"); - uds_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu", + vdo_log_info("struct flusher"); + vdo_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu", (unsigned long long) flusher->flush_generation, (unsigned long long) flusher->first_unacknowledged_generation); - uds_log_info(" notifiers queue is %s; pending_flushes queue is %s", + vdo_log_info(" notifiers queue is %s; pending_flushes queue is %s", (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"), (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty")); } diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index cf04cdef0750..ae11941c90a9 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -485,7 +485,7 @@ static void dump_simple_work_queue(struct simple_work_queue *queue) thread_status = atomic_read(&queue->idle) ? "idle" : "running"; } - uds_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name, + vdo_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name, thread_status, task_state_report); /* ->waiting_worker_threads wait queue status? anyone waiting? */ diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c index 47e4ed234242..7e32a25d3f2f 100644 --- a/drivers/md/dm-vdo/indexer/chapter-index.c +++ b/drivers/md/dm-vdo/indexer/chapter-index.c @@ -166,7 +166,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index, if (removals == 0) { uds_get_delta_index_stats(delta_index, &stats); - uds_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions", + vdo_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions", (unsigned long long) chapter_number, (unsigned long long) stats.record_count, (unsigned long long) stats.collision_count); @@ -198,7 +198,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index, } if (removals > 0) { - uds_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index", + vdo_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index", (unsigned long long) chapter_number, removals); } diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c index 69bf27a9d61b..5532371b952f 100644 --- a/drivers/md/dm-vdo/indexer/config.c +++ b/drivers/md/dm-vdo/indexer/config.c @@ -33,54 +33,54 @@ static bool are_matching_configurations(struct uds_configuration *saved_config, bool result = true; if (saved_geometry->record_pages_per_chapter != geometry->record_pages_per_chapter) { - uds_log_error("Record pages per chapter (%u) does not match (%u)", + vdo_log_error("Record pages per chapter (%u) does not match (%u)", saved_geometry->record_pages_per_chapter, geometry->record_pages_per_chapter); result = false; } if (saved_geometry->chapters_per_volume != geometry->chapters_per_volume) { - uds_log_error("Chapter count (%u) does not match (%u)", + vdo_log_error("Chapter count (%u) does not match (%u)", saved_geometry->chapters_per_volume, geometry->chapters_per_volume); result = false; } if (saved_geometry->sparse_chapters_per_volume != geometry->sparse_chapters_per_volume) { - uds_log_error("Sparse chapter count (%u) does not match (%u)", + vdo_log_error("Sparse chapter count (%u) does not match (%u)", saved_geometry->sparse_chapters_per_volume, geometry->sparse_chapters_per_volume); result = false; } if (saved_config->cache_chapters != user->cache_chapters) { - uds_log_error("Cache size (%u) does not match (%u)", + vdo_log_error("Cache size (%u) does not match (%u)", saved_config->cache_chapters, user->cache_chapters); result = false; } if (saved_config->volume_index_mean_delta != user->volume_index_mean_delta) { - uds_log_error("Volume index mean delta (%u) does not match (%u)", + vdo_log_error("Volume index mean delta (%u) does not match (%u)", saved_config->volume_index_mean_delta, user->volume_index_mean_delta); result = false; } if (saved_geometry->bytes_per_page != geometry->bytes_per_page) { - uds_log_error("Bytes per page value (%zu) does not match (%zu)", + vdo_log_error("Bytes per page value (%zu) does not match (%zu)", saved_geometry->bytes_per_page, geometry->bytes_per_page); result = false; } if (saved_config->sparse_sample_rate != user->sparse_sample_rate) { - uds_log_error("Sparse sample rate (%u) does not match (%u)", + vdo_log_error("Sparse sample rate (%u) does not match (%u)", saved_config->sparse_sample_rate, user->sparse_sample_rate); result = false; } if (saved_config->nonce != user->nonce) { - uds_log_error("Nonce (%llu) does not match (%llu)", + vdo_log_error("Nonce (%llu) does not match (%llu)", (unsigned long long) saved_config->nonce, (unsigned long long) user->nonce); result = false; @@ -109,11 +109,11 @@ int uds_validate_config_contents(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, version_buffer, INDEX_CONFIG_VERSION_LENGTH); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot read index config version"); + return vdo_log_error_strerror(result, "cannot read index config version"); if (!is_version(INDEX_CONFIG_VERSION_6_02, version_buffer) && !is_version(INDEX_CONFIG_VERSION_8_02, version_buffer)) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "unsupported configuration version: '%.*s'", INDEX_CONFIG_VERSION_LENGTH, version_buffer); @@ -121,7 +121,7 @@ int uds_validate_config_contents(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot read config data"); + return vdo_log_error_strerror(result, "cannot read config data"); decode_u32_le(buffer, &offset, &geometry.record_pages_per_chapter); decode_u32_le(buffer, &offset, &geometry.chapters_per_volume); @@ -149,7 +149,7 @@ int uds_validate_config_contents(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, remapping, sizeof(remapping)); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot read converted config"); + return vdo_log_error_strerror(result, "cannot read converted config"); offset = 0; decode_u64_le(remapping, &offset, @@ -159,7 +159,7 @@ int uds_validate_config_contents(struct buffered_reader *reader, } if (!are_matching_configurations(&config, &geometry, user_config)) { - uds_log_warning("Supplied configuration does not match save"); + vdo_log_warning("Supplied configuration does not match save"); return UDS_NO_INDEX; } @@ -263,7 +263,7 @@ static int compute_memory_sizes(uds_memory_config_size_t mem_gb, bool sparse, DEFAULT_CHAPTERS_PER_VOLUME); *record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER; } else { - uds_log_error("received invalid memory size"); + vdo_log_error("received invalid memory size"); return -EINVAL; } @@ -292,7 +292,7 @@ static unsigned int __must_check normalize_zone_count(unsigned int requested) if (zone_count > MAX_ZONES) zone_count = MAX_ZONES; - uds_log_info("Using %u indexing zone%s for concurrency.", + vdo_log_info("Using %u indexing zone%s for concurrency.", zone_count, zone_count == 1 ? "" : "s"); return zone_count; } @@ -364,13 +364,13 @@ void uds_log_configuration(struct uds_configuration *config) { struct index_geometry *geometry = config->geometry; - uds_log_debug("Configuration:"); - uds_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter); - uds_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume); - uds_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume); - uds_log_debug(" Cache size (chapters): %10u", config->cache_chapters); - uds_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta); - uds_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page); - uds_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate); - uds_log_debug(" Nonce: %llu", (unsigned long long) config->nonce); + vdo_log_debug("Configuration:"); + vdo_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter); + vdo_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume); + vdo_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume); + vdo_log_debug(" Cache size (chapters): %10u", config->cache_chapters); + vdo_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta); + vdo_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page); + vdo_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate); + vdo_log_debug(" Nonce: %llu", (unsigned long long) config->nonce); } diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c index b49066554248..0ac2443f0df3 100644 --- a/drivers/md/dm-vdo/indexer/delta-index.c +++ b/drivers/md/dm-vdo/indexer/delta-index.c @@ -375,7 +375,7 @@ int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zon */ if (delta_index->list_count <= first_list_in_zone) { uds_uninitialize_delta_index(delta_index); - return uds_log_error_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_error_strerror(UDS_INVALID_ARGUMENT, "%u delta lists not enough for %u zones", list_count, zone_count); } @@ -732,7 +732,7 @@ int uds_pack_delta_index_page(const struct delta_index *delta_index, u64 header_ free_bits -= GUARD_BITS; if (free_bits < IMMUTABLE_HEADER_SIZE) { /* This page is too small to store any delta lists. */ - return uds_log_error_strerror(UDS_OVERFLOW, + return vdo_log_error_strerror(UDS_OVERFLOW, "Chapter Index Page of %zu bytes is too small", memory_size); } @@ -843,7 +843,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, result = uds_read_from_buffered_reader(buffered_readers[z], buffer, sizeof(buffer)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read delta index header"); } @@ -860,23 +860,23 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, "%zu bytes decoded of %zu expected", offset, sizeof(struct delta_index_header)); if (result != VDO_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read delta index header"); } if (memcmp(header.magic, DELTA_INDEX_MAGIC, MAGIC_SIZE) != 0) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index file has bad magic number"); } if (zone_count != header.zone_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index files contain mismatched zone counts (%u,%u)", zone_count, header.zone_count); } if (header.zone_number != z) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index zone %u found in slot %u", header.zone_number, z); } @@ -887,7 +887,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, collision_count += header.collision_count; if (first_list[z] != list_next) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index file for zone %u starts with list %u instead of list %u", z, first_list[z], list_next); } @@ -896,13 +896,13 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, } if (list_next != delta_index->list_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index files contain %u delta lists instead of %u delta lists", list_next, delta_index->list_count); } if (collision_count > record_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "delta index files contain %llu collisions and %llu records", (unsigned long long) collision_count, (unsigned long long) record_count); @@ -927,7 +927,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, size_data, sizeof(size_data)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read delta index size"); } @@ -960,7 +960,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone, u32 list_number = save_info->index - delta_zone->first_list; if (list_number >= delta_zone->list_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "invalid delta list number %u not in range [%u,%u)", save_info->index, delta_zone->first_list, delta_zone->first_list + delta_zone->list_count); @@ -968,7 +968,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone, delta_list = &delta_zone->delta_lists[list_number + 1]; if (delta_list->size == 0) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "unexpected delta list number %u", save_info->index); } @@ -976,7 +976,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone, bit_count = delta_list->size + save_info->bit_offset; byte_count = BITS_TO_BYTES(bit_count); if (save_info->byte_count != byte_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "unexpected delta list size %u != %u", save_info->byte_count, byte_count); } @@ -996,7 +996,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int result = uds_read_from_buffered_reader(buffered_reader, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read delta list data"); } @@ -1009,7 +1009,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int if ((save_info.bit_offset >= BITS_PER_BYTE) || (save_info.byte_count > DELTA_LIST_MAX_BYTE_COUNT)) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "corrupt delta list data"); } @@ -1018,7 +1018,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int return UDS_CORRUPT_DATA; if (save_info.index >= delta_index->list_count) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "invalid delta list number %u of %u", save_info.index, delta_index->list_count); @@ -1027,7 +1027,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int result = uds_read_from_buffered_reader(buffered_reader, data, save_info.byte_count); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read delta list data"); } @@ -1102,7 +1102,7 @@ static int flush_delta_list(struct delta_zone *zone, u32 flush_index) result = uds_write_to_buffered_writer(zone->buffered_writer, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) { - uds_log_warning_strerror(result, "failed to write delta list memory"); + vdo_log_warning_strerror(result, "failed to write delta list memory"); return result; } @@ -1110,7 +1110,7 @@ static int flush_delta_list(struct delta_zone *zone, u32 flush_index) zone->memory + get_delta_list_byte_start(delta_list), get_delta_list_byte_size(delta_list)); if (result != UDS_SUCCESS) - uds_log_warning_strerror(result, "failed to write delta list memory"); + vdo_log_warning_strerror(result, "failed to write delta list memory"); return result; } @@ -1144,7 +1144,7 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index, result = uds_write_to_buffered_writer(buffered_writer, buffer, offset); if (result != UDS_SUCCESS) - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to write delta index header"); for (i = 0; i < delta_zone->list_count; i++) { @@ -1156,7 +1156,7 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index, result = uds_write_to_buffered_writer(buffered_writer, data, sizeof(data)); if (result != UDS_SUCCESS) - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to write delta list size"); } @@ -1197,7 +1197,7 @@ int uds_write_guard_delta_list(struct buffered_writer *buffered_writer) result = uds_write_to_buffered_writer(buffered_writer, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) - uds_log_warning_strerror(result, "failed to write guard delta list"); + vdo_log_warning_strerror(result, "failed to write guard delta list"); return UDS_SUCCESS; } @@ -1378,7 +1378,7 @@ noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry) * This is not an assertion because uds_validate_chapter_index_page() wants to * handle this error. */ - uds_log_warning("Decoded past the end of the delta list"); + vdo_log_warning("Decoded past the end of the delta list"); return UDS_CORRUPT_DATA; } @@ -1959,7 +1959,7 @@ u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_del void uds_log_delta_index_entry(struct delta_index_entry *delta_entry) { - uds_log_ratelimit(uds_log_info, + vdo_log_ratelimit(vdo_log_info, "List 0x%X Key 0x%X Offset 0x%X%s%s List_size 0x%X%s", delta_entry->list_number, delta_entry->key, delta_entry->offset, delta_entry->at_end ? " end" : "", diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index 74fd44c20e5c..627adc24af3b 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -231,7 +231,7 @@ static int __must_check compute_sizes(const struct uds_configuration *config, result = uds_compute_volume_index_save_blocks(config, sls->block_size, &sls->volume_index_blocks); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot compute index save size"); + return vdo_log_error_strerror(result, "cannot compute index save size"); sls->page_map_blocks = DIV_ROUND_UP(uds_compute_index_page_map_save_size(geometry), @@ -255,13 +255,13 @@ int uds_compute_index_size(const struct uds_parameters *parameters, u64 *index_s struct save_layout_sizes sizes; if (index_size == NULL) { - uds_log_error("Missing output size pointer"); + vdo_log_error("Missing output size pointer"); return -EINVAL; } result = uds_make_configuration(parameters, &index_config); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, "cannot compute index size"); + vdo_log_error_strerror(result, "cannot compute index size"); return uds_status_to_errno(result); } @@ -648,7 +648,7 @@ static int discard_index_state_data(struct index_layout *layout) } if (saved_result != UDS_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "%s: cannot destroy all index saves", __func__); } @@ -755,18 +755,18 @@ static int __must_check write_uds_index_config(struct index_layout *layout, result = open_layout_writer(layout, &layout->config, offset, &writer); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "failed to open config region"); + return vdo_log_error_strerror(result, "failed to open config region"); result = uds_write_config_contents(writer, config, layout->super.version); if (result != UDS_SUCCESS) { uds_free_buffered_writer(writer); - return uds_log_error_strerror(result, "failed to write config region"); + return vdo_log_error_strerror(result, "failed to write config region"); } result = uds_flush_buffered_writer(writer); if (result != UDS_SUCCESS) { uds_free_buffered_writer(writer); - return uds_log_error_strerror(result, "cannot flush config writer"); + return vdo_log_error_strerror(result, "cannot flush config writer"); } uds_free_buffered_writer(writer); @@ -873,7 +873,7 @@ static int find_latest_uds_index_save_slot(struct index_layout *layout, } if (latest == NULL) { - uds_log_error("No valid index save found"); + vdo_log_error("No valid index save found"); return UDS_INDEX_NOT_SAVED_CLEANLY; } @@ -1145,7 +1145,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot read region table header"); + return vdo_log_error_strerror(result, "cannot read region table header"); decode_u64_le(buffer, &offset, &header.magic); decode_u64_le(buffer, &offset, &header.region_blocks); @@ -1158,7 +1158,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, return UDS_NO_INDEX; if (header.version != 1) { - return uds_log_error_strerror(UDS_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION, "unknown region table version %hu", header.version); } @@ -1178,7 +1178,7 @@ static int __must_check load_region_table(struct buffered_reader *reader, sizeof(region_buffer)); if (result != UDS_SUCCESS) { vdo_free(table); - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "cannot read region table layouts"); } @@ -1209,7 +1209,7 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, result = uds_read_from_buffered_reader(reader, buffer, saved_size); if (result != UDS_SUCCESS) { vdo_free(buffer); - return uds_log_error_strerror(result, "cannot read region table header"); + return vdo_log_error_strerror(result, "cannot read region table header"); } memcpy(&super->magic_label, buffer, MAGIC_SIZE); @@ -1236,19 +1236,19 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, vdo_free(buffer); if (memcmp(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE) != 0) - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "unknown superblock magic label"); if ((super->version < SUPER_VERSION_MINIMUM) || (super->version == 4) || (super->version == 5) || (super->version == 6) || (super->version > SUPER_VERSION_MAXIMUM)) { - return uds_log_error_strerror(UDS_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION, "unknown superblock version number %u", super->version); } if (super->volume_offset < super->start_offset) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "inconsistent offsets (start %llu, volume %llu)", (unsigned long long) super->start_offset, (unsigned long long) super->volume_offset); @@ -1256,13 +1256,13 @@ static int __must_check read_super_block_data(struct buffered_reader *reader, /* Sub-indexes are no longer used but the layout retains this field. */ if (super->index_count != 1) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "invalid subindex count %u", super->index_count); } if (generate_primary_nonce(super->nonce_info, sizeof(super->nonce_info)) != super->nonce) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "inconsistent superblock nonce"); } @@ -1273,15 +1273,15 @@ static int __must_check verify_region(struct layout_region *lr, u64 start_block, enum region_kind kind, unsigned int instance) { if (lr->start_block != start_block) - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "incorrect layout region offset"); if (lr->kind != kind) - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "incorrect layout region kind"); if (lr->instance != instance) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "incorrect layout region instance"); } @@ -1323,7 +1323,7 @@ static int __must_check verify_sub_index(struct index_layout *layout, u64 start_ next_block -= layout->super.volume_offset; if (next_block != start_block + sil->sub_index.block_count) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "sub index region does not span all saves"); } @@ -1368,7 +1368,7 @@ static int __must_check reconstitute_layout(struct index_layout *layout, return result; if (++next_block != (first_block + layout->total_blocks)) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "layout table does not span total blocks"); } @@ -1388,19 +1388,19 @@ static int __must_check load_super_block(struct index_layout *layout, size_t blo if (table->header.type != RH_TYPE_SUPER) { vdo_free(table); - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "not a superblock region table"); } result = read_super_block_data(reader, layout, table->header.payload); if (result != UDS_SUCCESS) { vdo_free(table); - return uds_log_error_strerror(result, "unknown superblock format"); + return vdo_log_error_strerror(result, "unknown superblock format"); } if (super->block_size != block_size) { vdo_free(table); - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "superblock saved block_size %u differs from supplied block_size %zu", super->block_size, block_size); } @@ -1421,14 +1421,14 @@ static int __must_check read_index_save_data(struct buffered_reader *reader, size_t offset = 0; if (saved_size != sizeof(buffer)) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "unexpected index save data size %zu", saved_size); } result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer)); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "cannot read index save data"); + return vdo_log_error_strerror(result, "cannot read index save data"); decode_u64_le(buffer, &offset, &isl->save_data.timestamp); decode_u64_le(buffer, &offset, &isl->save_data.nonce); @@ -1436,7 +1436,7 @@ static int __must_check read_index_save_data(struct buffered_reader *reader, offset += sizeof(u32); if (isl->save_data.version > 1) { - return uds_log_error_strerror(UDS_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION, "unknown index save version number %u", isl->save_data.version); } @@ -1446,7 +1446,7 @@ static int __must_check read_index_save_data(struct buffered_reader *reader, if ((file_version.signature != INDEX_STATE_VERSION_301.signature) || (file_version.version_id != INDEX_STATE_VERSION_301.version_id)) { - return uds_log_error_strerror(UDS_UNSUPPORTED_VERSION, + return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION, "index state version %d,%d is unsupported", file_version.signature, file_version.version_id); @@ -1523,7 +1523,7 @@ static int __must_check reconstruct_index_save(struct index_save_layout *isl, next_block += isl->free_space.block_count; if (next_block != last_block) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "index save layout table incomplete"); } @@ -1539,7 +1539,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, result = load_region_table(reader, &table); if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, "cannot read index save %u header", + return vdo_log_error_strerror(result, "cannot read index save %u header", instance); } @@ -1547,7 +1547,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, u64 region_blocks = table->header.region_blocks; vdo_free(table); - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "unexpected index save %u region block count %llu", instance, (unsigned long long) region_blocks); @@ -1561,7 +1561,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, if (table->header.type != RH_TYPE_SAVE) { - uds_log_error_strerror(UDS_CORRUPT_DATA, + vdo_log_error_strerror(UDS_CORRUPT_DATA, "unexpected index save %u header type %u", instance, table->header.type); vdo_free(table); @@ -1571,7 +1571,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, result = read_index_save_data(reader, isl, table->header.payload); if (result != UDS_SUCCESS) { vdo_free(table); - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "unknown index save %u data format", instance); } @@ -1579,7 +1579,7 @@ static int __must_check load_index_save(struct index_save_layout *isl, result = reconstruct_index_save(isl, table); vdo_free(table); if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, "cannot reconstruct index save %u", + return vdo_log_error_strerror(result, "cannot reconstruct index save %u", instance); } @@ -1598,7 +1598,7 @@ static int __must_check load_sub_index_regions(struct index_layout *layout) result = open_region_reader(layout, &isl->index_save, &reader); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "cannot get reader for index 0 save %u", j); return result; @@ -1626,12 +1626,12 @@ static int __must_check verify_uds_index_config(struct index_layout *layout, offset = layout->super.volume_offset - layout->super.start_offset; result = open_layout_reader(layout, &layout->config, offset, &reader); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "failed to open config reader"); + return vdo_log_error_strerror(result, "failed to open config reader"); result = uds_validate_config_contents(reader, config); if (result != UDS_SUCCESS) { uds_free_buffered_reader(reader); - return uds_log_error_strerror(result, "failed to read config region"); + return vdo_log_error_strerror(result, "failed to read config region"); } uds_free_buffered_reader(reader); @@ -1646,7 +1646,7 @@ static int load_index_layout(struct index_layout *layout, struct uds_configurati result = uds_make_buffered_reader(layout->factory, layout->offset / UDS_BLOCK_SIZE, 1, &reader); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, "unable to read superblock"); + return vdo_log_error_strerror(result, "unable to read superblock"); result = load_super_block(layout, UDS_BLOCK_SIZE, layout->offset / UDS_BLOCK_SIZE, reader); @@ -1675,7 +1675,7 @@ static int create_layout_factory(struct index_layout *layout, writable_size = uds_get_writable_size(factory) & -UDS_BLOCK_SIZE; if (writable_size < config->size + config->offset) { uds_put_io_factory(factory); - uds_log_error("index storage (%zu) is smaller than the requested size %zu", + vdo_log_error("index storage (%zu) is smaller than the requested size %zu", writable_size, config->size + config->offset); return -ENOSPC; } @@ -1708,7 +1708,7 @@ int uds_make_index_layout(struct uds_configuration *config, bool new_layout, } if (layout->factory_size < sizes.total_size) { - uds_log_error("index storage (%zu) is smaller than the required size %llu", + vdo_log_error("index storage (%zu) is smaller than the required size %llu", layout->factory_size, (unsigned long long) sizes.total_size); uds_free_index_layout(layout); diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c index c5d1b9995846..00b44e07d0c1 100644 --- a/drivers/md/dm-vdo/indexer/index-page-map.c +++ b/drivers/md/dm-vdo/indexer/index-page-map.c @@ -167,7 +167,7 @@ int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader * decode_u16_le(buffer, &offset, &map->entries[i]); vdo_free(buffer); - uds_log_debug("read index page map, last update %llu", + vdo_log_debug("read index page map, last update %llu", (unsigned long long) map->last_update); return UDS_SUCCESS; } diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c index 9eae00548095..aee0914d604a 100644 --- a/drivers/md/dm-vdo/indexer/index-session.c +++ b/drivers/md/dm-vdo/indexer/index-session.c @@ -104,7 +104,7 @@ int uds_launch_request(struct uds_request *request) int result; if (request->callback == NULL) { - uds_log_error("missing required callback"); + vdo_log_error("missing required callback"); return -EINVAL; } @@ -116,7 +116,7 @@ int uds_launch_request(struct uds_request *request) case UDS_UPDATE: break; default: - uds_log_error("received invalid callback type"); + vdo_log_error("received invalid callback type"); return -EINVAL; } @@ -244,7 +244,7 @@ static int __must_check make_empty_index_session(struct uds_index_session **inde int uds_create_index_session(struct uds_index_session **session) { if (session == NULL) { - uds_log_error("missing session pointer"); + vdo_log_error("missing session pointer"); return -EINVAL; } @@ -257,10 +257,10 @@ static int __must_check start_loading_index_session(struct uds_index_session *in mutex_lock(&index_session->request_mutex); if (index_session->state & IS_FLAG_SUSPENDED) { - uds_log_info("Index session is suspended"); + vdo_log_info("Index session is suspended"); result = -EBUSY; } else if (index_session->state != 0) { - uds_log_info("Index is already loaded"); + vdo_log_info("Index is already loaded"); result = -EBUSY; } else { index_session->state |= IS_FLAG_LOADING; @@ -290,7 +290,7 @@ static int initialize_index_session(struct uds_index_session *index_session, result = uds_make_configuration(&index_session->parameters, &config); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, "Failed to allocate config"); + vdo_log_error_strerror(result, "Failed to allocate config"); return result; } @@ -298,7 +298,7 @@ static int initialize_index_session(struct uds_index_session *index_session, result = uds_make_index(config, open_type, &index_session->load_context, enter_callback_stage, &index_session->index); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Failed to make index"); + vdo_log_error_strerror(result, "Failed to make index"); else uds_log_configuration(config); @@ -332,15 +332,15 @@ int uds_open_index(enum uds_open_index_type open_type, char name[BDEVNAME_SIZE]; if (parameters == NULL) { - uds_log_error("missing required parameters"); + vdo_log_error("missing required parameters"); return -EINVAL; } if (parameters->bdev == NULL) { - uds_log_error("missing required block device"); + vdo_log_error("missing required block device"); return -EINVAL; } if (session == NULL) { - uds_log_error("missing required session pointer"); + vdo_log_error("missing required session pointer"); return -EINVAL; } @@ -350,11 +350,11 @@ int uds_open_index(enum uds_open_index_type open_type, session->parameters = *parameters; format_dev_t(name, parameters->bdev->bd_dev); - uds_log_info("%s: %s", get_open_type_string(open_type), name); + vdo_log_info("%s: %s", get_open_type_string(open_type), name); result = initialize_index_session(session, open_type); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "Failed %s", + vdo_log_error_strerror(result, "Failed %s", get_open_type_string(open_type)); finish_loading_index_session(session, result); @@ -426,7 +426,7 @@ int uds_suspend_index_session(struct uds_index_session *session, bool save) if ((session->state & IS_FLAG_WAITING) || (session->state & IS_FLAG_DESTROYING)) { no_work = true; - uds_log_info("Index session is already changing state"); + vdo_log_info("Index session is already changing state"); result = -EBUSY; } else if (session->state & IS_FLAG_SUSPENDED) { no_work = true; @@ -485,7 +485,7 @@ int uds_resume_index_session(struct uds_index_session *session, mutex_lock(&session->request_mutex); if (session->state & IS_FLAG_WAITING) { - uds_log_info("Index session is already changing state"); + vdo_log_info("Index session is already changing state"); no_work = true; result = -EBUSY; } else if (!(session->state & IS_FLAG_SUSPENDED)) { @@ -562,7 +562,7 @@ static int save_and_free_index(struct uds_index_session *index_session) if (!suspended) { result = uds_save_index(index); if (result != UDS_SUCCESS) - uds_log_warning_strerror(result, + vdo_log_warning_strerror(result, "ignoring error from save_index"); } uds_free_index(index); @@ -598,7 +598,7 @@ int uds_close_index(struct uds_index_session *index_session) } if (index_session->state & IS_FLAG_SUSPENDED) { - uds_log_info("Index session is suspended"); + vdo_log_info("Index session is suspended"); result = -EBUSY; } else if ((index_session->state & IS_FLAG_DESTROYING) || !(index_session->state & IS_FLAG_LOADED)) { @@ -611,10 +611,10 @@ int uds_close_index(struct uds_index_session *index_session) if (result != UDS_SUCCESS) return uds_status_to_errno(result); - uds_log_debug("Closing index"); + vdo_log_debug("Closing index"); wait_for_no_requests_in_progress(index_session); result = save_and_free_index(index_session); - uds_log_debug("Closed index"); + vdo_log_debug("Closed index"); mutex_lock(&index_session->request_mutex); index_session->state &= ~IS_FLAG_CLOSING; @@ -629,7 +629,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session) int result; bool load_pending = false; - uds_log_debug("Destroying index session"); + vdo_log_debug("Destroying index session"); /* Wait for any current index state change to complete. */ mutex_lock(&index_session->request_mutex); @@ -641,7 +641,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session) if (index_session->state & IS_FLAG_DESTROYING) { mutex_unlock(&index_session->request_mutex); - uds_log_info("Index session is already closing"); + vdo_log_info("Index session is already closing"); return -EBUSY; } @@ -672,7 +672,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session) result = save_and_free_index(index_session); uds_request_queue_finish(index_session->callback_queue); index_session->callback_queue = NULL; - uds_log_debug("Destroyed index session"); + vdo_log_debug("Destroyed index session"); vdo_free(index_session); return uds_status_to_errno(result); } @@ -710,7 +710,7 @@ int uds_get_index_session_stats(struct uds_index_session *index_session, struct uds_index_stats *stats) { if (stats == NULL) { - uds_log_error("received a NULL index stats pointer"); + vdo_log_error("received a NULL index stats pointer"); return -EINVAL; } diff --git a/drivers/md/dm-vdo/indexer/index.c b/drivers/md/dm-vdo/indexer/index.c index 221af95ca2a4..1ba767144426 100644 --- a/drivers/md/dm-vdo/indexer/index.c +++ b/drivers/md/dm-vdo/indexer/index.c @@ -188,7 +188,7 @@ static int finish_previous_chapter(struct uds_index *index, u64 current_chapter_ mutex_unlock(&writer->mutex); if (result != UDS_SUCCESS) - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Writing of previous open chapter failed"); return UDS_SUCCESS; @@ -258,7 +258,7 @@ static int open_next_chapter(struct index_zone *zone) unsigned int finished_zones; u32 expire_chapters; - uds_log_debug("closing chapter %llu of zone %u after %u entries (%u short)", + vdo_log_debug("closing chapter %llu of zone %u after %u entries (%u short)", (unsigned long long) zone->newest_virtual_chapter, zone->id, zone->open_chapter->size, zone->open_chapter->capacity - zone->open_chapter->size); @@ -315,7 +315,7 @@ static int dispatch_index_zone_control_request(struct uds_request *request) return handle_chapter_closed(zone, message->virtual_chapter); default: - uds_log_error("invalid message type: %d", message->type); + vdo_log_error("invalid message type: %d", message->type); return UDS_INVALID_ARGUMENT; } } @@ -600,7 +600,7 @@ static int dispatch_index_request(struct uds_index *index, struct uds_request *r break; default: - result = uds_log_warning_strerror(UDS_INVALID_ARGUMENT, + result = vdo_log_warning_strerror(UDS_INVALID_ARGUMENT, "invalid request type: %d", request->type); break; @@ -618,7 +618,7 @@ static void execute_zone_request(struct uds_request *request) if (request->zone_message.type != UDS_MESSAGE_NONE) { result = dispatch_index_zone_control_request(request); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, "error executing message: %d", + vdo_log_error_strerror(result, "error executing message: %d", request->zone_message.type); } @@ -678,7 +678,7 @@ static void close_chapters(void *arg) struct chapter_writer *writer = arg; struct uds_index *index = writer->index; - uds_log_debug("chapter writer starting"); + vdo_log_debug("chapter writer starting"); mutex_lock(&writer->mutex); for (;;) { while (writer->zones_to_write < index->zone_count) { @@ -688,7 +688,7 @@ static void close_chapters(void *arg) * open chapter, so we can exit now. */ mutex_unlock(&writer->mutex); - uds_log_debug("chapter writer stopping"); + vdo_log_debug("chapter writer stopping"); return; } uds_wait_cond(&writer->cond, &writer->mutex); @@ -711,7 +711,7 @@ static void close_chapters(void *arg) index->has_saved_open_chapter = false; result = uds_discard_open_chapter(index->layout); if (result == UDS_SUCCESS) - uds_log_debug("Discarding saved open chapter"); + vdo_log_debug("Discarding saved open chapter"); } result = uds_close_open_chapter(writer->chapters, index->zone_count, @@ -818,7 +818,7 @@ static int load_index(struct uds_index *index) last_save_chapter = ((index->last_save != NO_LAST_SAVE) ? index->last_save : 0); - uds_log_info("loaded index from chapter %llu through chapter %llu", + vdo_log_info("loaded index from chapter %llu through chapter %llu", (unsigned long long) index->oldest_virtual_chapter, (unsigned long long) last_save_chapter); @@ -843,7 +843,7 @@ static int rebuild_index_page_map(struct uds_index *index, u64 vcn) index_page_number, &chapter_index_page); if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "failed to read index page %u in chapter %u", index_page_number, chapter); } @@ -851,7 +851,7 @@ static int rebuild_index_page_map(struct uds_index *index, u64 vcn) lowest_delta_list = chapter_index_page->lowest_list_number; highest_delta_list = chapter_index_page->highest_list_number; if (lowest_delta_list != expected_list_number) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "chapter %u index page %u is corrupt", chapter, index_page_number); } @@ -980,7 +980,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse) u32 physical_chapter; if (check_for_suspend(index)) { - uds_log_info("Replay interrupted by index shutdown at chapter %llu", + vdo_log_info("Replay interrupted by index shutdown at chapter %llu", (unsigned long long) virtual); return -EBUSY; } @@ -992,7 +992,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse) result = rebuild_index_page_map(index, virtual); if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "could not rebuild index page map for chapter %u", physical_chapter); } @@ -1005,7 +1005,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse) result = uds_get_volume_record_page(index->volume, physical_chapter, record_page_number, &record_page); if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, "could not get page %d", + return vdo_log_error_strerror(result, "could not get page %d", record_page_number); } @@ -1034,7 +1034,7 @@ static int replay_volume(struct uds_index *index) u64 upto_virtual = index->newest_virtual_chapter; bool will_be_sparse; - uds_log_info("Replaying volume from chapter %llu through chapter %llu", + vdo_log_info("Replaying volume from chapter %llu through chapter %llu", (unsigned long long) from_virtual, (unsigned long long) upto_virtual); @@ -1064,7 +1064,7 @@ static int replay_volume(struct uds_index *index) new_map_update = index->volume->index_page_map->last_update; if (new_map_update != old_map_update) { - uds_log_info("replay changed index page map update from %llu to %llu", + vdo_log_info("replay changed index page map update from %llu to %llu", (unsigned long long) old_map_update, (unsigned long long) new_map_update); } @@ -1084,7 +1084,7 @@ static int rebuild_index(struct uds_index *index) result = uds_find_volume_chapter_boundaries(index->volume, &lowest, &highest, &is_empty); if (result != UDS_SUCCESS) { - return uds_log_fatal_strerror(result, + return vdo_log_fatal_strerror(result, "cannot rebuild index: unknown volume chapter boundaries"); } @@ -1194,7 +1194,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op result = make_index_zone(index, z); if (result != UDS_SUCCESS) { uds_free_index(index); - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Could not create index zone"); } } @@ -1203,7 +1203,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op result = uds_make_volume_index(config, nonce, &index->volume_index); if (result != UDS_SUCCESS) { uds_free_index(index); - return uds_log_error_strerror(result, "could not make volume index"); + return vdo_log_error_strerror(result, "could not make volume index"); } index->load_context = load_context; @@ -1229,14 +1229,14 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op break; case -ENOMEM: /* We should not try a rebuild for this error. */ - uds_log_error_strerror(result, "index could not be loaded"); + vdo_log_error_strerror(result, "index could not be loaded"); break; default: - uds_log_error_strerror(result, "index could not be loaded"); + vdo_log_error_strerror(result, "index could not be loaded"); if (open_type == UDS_LOAD) { result = rebuild_index(index); if (result != UDS_SUCCESS) { - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "index could not be rebuilt"); } } @@ -1246,7 +1246,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op if (result != UDS_SUCCESS) { uds_free_index(index); - return uds_log_error_strerror(result, "fatal error in %s()", __func__); + return vdo_log_error_strerror(result, "fatal error in %s()", __func__); } for (z = 0; z < index->zone_count; z++) { @@ -1320,16 +1320,16 @@ int uds_save_index(struct uds_index *index) index->prev_save = index->last_save; index->last_save = ((index->newest_virtual_chapter == 0) ? NO_LAST_SAVE : index->newest_virtual_chapter - 1); - uds_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save); + vdo_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save); result = uds_save_index_state(index->layout, index); if (result != UDS_SUCCESS) { - uds_log_info("save index failed"); + vdo_log_info("save index failed"); index->last_save = index->prev_save; } else { index->has_saved_open_chapter = true; index->need_to_save = false; - uds_log_info("finished save (vcn %llu)", + vdo_log_info("finished save (vcn %llu)", (unsigned long long) index->last_save); } diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c index 0dcf6d596653..515765d35794 100644 --- a/drivers/md/dm-vdo/indexer/io-factory.c +++ b/drivers/md/dm-vdo/indexer/io-factory.c @@ -365,7 +365,7 @@ void uds_free_buffered_writer(struct buffered_writer *writer) flush_previous_buffer(writer); result = -dm_bufio_write_dirty_buffers(writer->client); if (result != UDS_SUCCESS) - uds_log_warning_strerror(result, "%s: failed to sync storage", __func__); + vdo_log_warning_strerror(result, "%s: failed to sync storage", __func__); dm_bufio_client_destroy(writer->client); uds_put_io_factory(writer->factory); diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c index 46b7bc1ac324..4a67bcadaae0 100644 --- a/drivers/md/dm-vdo/indexer/open-chapter.c +++ b/drivers/md/dm-vdo/indexer/open-chapter.c @@ -259,14 +259,14 @@ static int fill_delta_chapter_index(struct open_chapter_zone **chapter_zones, overflow_count++; break; default: - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "failed to build open chapter index"); return result; } } if (overflow_count > 0) - uds_log_warning("Failed to add %d entries to chapter index", + vdo_log_warning("Failed to add %d entries to chapter index", overflow_count); return UDS_SUCCESS; @@ -417,7 +417,7 @@ int uds_load_open_chapter(struct uds_index *index, struct buffered_reader *reade return result; if (memcmp(OPEN_CHAPTER_VERSION, version, sizeof(version)) != 0) { - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "Invalid open chapter version: %.*s", (int) sizeof(version), version); } diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index e2b0600d82b9..12f954a0c532 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -225,13 +225,13 @@ static int compute_volume_sub_index_parameters(const struct uds_configuration *c params->address_bits = bits_per(address_count - 1); params->chapter_bits = bits_per(rounded_chapters - 1); if ((u32) params->list_count != params->list_count) { - return uds_log_warning_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT, "cannot initialize volume index with %llu delta lists", (unsigned long long) params->list_count); } if (params->address_bits > 31) { - return uds_log_warning_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT, "cannot initialize volume index with %u address bits", params->address_bits); } @@ -568,7 +568,7 @@ int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_ u64 low = get_zone_for_record(record)->virtual_chapter_low; u64 high = get_zone_for_record(record)->virtual_chapter_high; - return uds_log_warning_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT, "cannot put record into chapter number %llu that is out of the valid range %llu to %llu", (unsigned long long) virtual_chapter, (unsigned long long) low, @@ -590,7 +590,7 @@ int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_ record->is_found = true; break; case UDS_OVERFLOW: - uds_log_ratelimit(uds_log_warning_strerror, UDS_OVERFLOW, + vdo_log_ratelimit(vdo_log_warning_strerror, UDS_OVERFLOW, "Volume index entry dropped due to overflow condition"); uds_log_delta_index_entry(&record->delta_entry); break; @@ -606,7 +606,7 @@ int uds_remove_volume_index_record(struct volume_index_record *record) int result; if (!record->is_found) - return uds_log_warning_strerror(UDS_BAD_STATE, + return vdo_log_warning_strerror(UDS_BAD_STATE, "illegal operation on new record"); /* Mark the record so that it cannot be used again */ @@ -644,7 +644,7 @@ static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_ 1 + (used_bits - sub_index->max_zone_bits) / sub_index->chapter_zone_bits; if (expire_count == 1) { - uds_log_ratelimit(uds_log_info, + vdo_log_ratelimit(vdo_log_info, "zone %u: At chapter %llu, expiring chapter %llu early", zone_number, (unsigned long long) virtual_chapter, @@ -662,7 +662,7 @@ static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_ zone->virtual_chapter_high - zone->virtual_chapter_low; zone->virtual_chapter_low = zone->virtual_chapter_high; } - uds_log_ratelimit(uds_log_info, + vdo_log_ratelimit(vdo_log_info, "zone %u: At chapter %llu, expiring chapters %llu to %llu early", zone_number, (unsigned long long) virtual_chapter, @@ -713,14 +713,14 @@ int uds_set_volume_index_record_chapter(struct volume_index_record *record, int result; if (!record->is_found) - return uds_log_warning_strerror(UDS_BAD_STATE, + return vdo_log_warning_strerror(UDS_BAD_STATE, "illegal operation on new record"); if (!is_virtual_chapter_indexed(record, virtual_chapter)) { u64 low = get_zone_for_record(record)->virtual_chapter_low; u64 high = get_zone_for_record(record)->virtual_chapter_high; - return uds_log_warning_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT, "cannot set chapter number %llu that is out of the valid range %llu to %llu", (unsigned long long) virtual_chapter, (unsigned long long) low, @@ -820,7 +820,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, result = uds_read_from_buffered_reader(readers[i], buffer, sizeof(buffer)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read volume index header"); } @@ -839,14 +839,14 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, result = UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "volume index file had bad magic number"); } if (sub_index->volume_nonce == 0) { sub_index->volume_nonce = header.volume_nonce; } else if (header.volume_nonce != sub_index->volume_nonce) { - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "volume index volume nonce incorrect"); } @@ -857,7 +857,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, u64 low = header.virtual_chapter_low; u64 high = header.virtual_chapter_high; - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "Inconsistent volume index zone files: Chapter range is [%llu,%llu], chapter range %d is [%llu,%llu]", (unsigned long long) virtual_chapter_low, (unsigned long long) virtual_chapter_high, @@ -873,7 +873,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, result = uds_read_from_buffered_reader(readers[i], decoded, sizeof(u64)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read volume index flush ranges"); } @@ -891,7 +891,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, result = uds_start_restoring_delta_index(&sub_index->delta_index, readers, reader_count); if (result != UDS_SUCCESS) - return uds_log_warning_strerror(result, "restoring delta index failed"); + return vdo_log_warning_strerror(result, "restoring delta index failed"); return UDS_SUCCESS; } @@ -916,7 +916,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index, result = uds_read_from_buffered_reader(buffered_readers[i], buffer, sizeof(buffer)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to read volume index header"); } @@ -931,13 +931,13 @@ static int start_restoring_volume_index(struct volume_index *volume_index, result = UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0) - return uds_log_warning_strerror(UDS_CORRUPT_DATA, + return vdo_log_warning_strerror(UDS_CORRUPT_DATA, "volume index file had bad magic number"); if (i == 0) { volume_index->sparse_sample_rate = header.sparse_sample_rate; } else if (volume_index->sparse_sample_rate != header.sparse_sample_rate) { - uds_log_warning_strerror(UDS_CORRUPT_DATA, + vdo_log_warning_strerror(UDS_CORRUPT_DATA, "Inconsistent sparse sample rate in delta index zone files: %u vs. %u", volume_index->sparse_sample_rate, header.sparse_sample_rate); @@ -1031,7 +1031,7 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde result = uds_write_to_buffered_writer(buffered_writer, buffer, offset); if (result != UDS_SUCCESS) - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to write volume index header"); for (i = 0; i < list_count; i++) { @@ -1041,7 +1041,7 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde result = uds_write_to_buffered_writer(buffered_writer, encoded, sizeof(u64)); if (result != UDS_SUCCESS) { - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to write volume index flush ranges"); } } @@ -1074,7 +1074,7 @@ static int start_saving_volume_index(const struct volume_index *volume_index, result = uds_write_to_buffered_writer(writer, buffer, offset); if (result != UDS_SUCCESS) { - uds_log_warning_strerror(result, "failed to write volume index header"); + vdo_log_warning_strerror(result, "failed to write volume index header"); return result; } @@ -1264,7 +1264,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non &volume_index->vi_non_hook); if (result != UDS_SUCCESS) { uds_free_volume_index(volume_index); - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Error creating non hook volume index"); } @@ -1272,7 +1272,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non &volume_index->vi_hook); if (result != UDS_SUCCESS) { uds_free_volume_index(volume_index); - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Error creating hook volume index"); } diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index 701f2220d803..655453bb276b 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -357,7 +357,7 @@ static void enqueue_page_read(struct volume *volume, struct uds_request *request { /* Mark the page as queued, so that chapter invalidation knows to cancel a read. */ while (!enqueue_read(&volume->page_cache, request, physical_page)) { - uds_log_debug("Read queue full, waiting for reads to finish"); + vdo_log_debug("Read queue full, waiting for reads to finish"); uds_wait_cond(&volume->read_threads_read_done_cond, &volume->read_threads_mutex); } @@ -431,7 +431,7 @@ static int init_chapter_index_page(const struct volume *volume, u8 *index_page, return result; if (result != UDS_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Reading chapter index page for chapter %u page %u", chapter, index_page_number); } @@ -445,14 +445,14 @@ static int init_chapter_index_page(const struct volume *volume, u8 *index_page, (highest_list == chapter_index_page->highest_list_number)) return UDS_SUCCESS; - uds_log_warning("Index page map updated to %llu", + vdo_log_warning("Index page map updated to %llu", (unsigned long long) volume->index_page_map->last_update); - uds_log_warning("Page map expects that chapter %u page %u has range %u to %u, but chapter index page has chapter %llu with range %u to %u", + vdo_log_warning("Page map expects that chapter %u page %u has range %u to %u, but chapter index page has chapter %llu with range %u to %u", chapter, index_page_number, lowest_list, highest_list, (unsigned long long) ci_virtual, chapter_index_page->lowest_list_number, chapter_index_page->highest_list_number); - return uds_log_error_strerror(UDS_CORRUPT_DATA, + return vdo_log_error_strerror(UDS_CORRUPT_DATA, "index page map mismatch with chapter index"); } @@ -547,7 +547,7 @@ static int process_entry(struct volume *volume, struct queued_read *entry) int result; if (entry->invalid) { - uds_log_debug("Requeuing requests for invalid page"); + vdo_log_debug("Requeuing requests for invalid page"); return UDS_SUCCESS; } @@ -558,7 +558,7 @@ static int process_entry(struct volume *volume, struct queued_read *entry) mutex_lock(&volume->read_threads_mutex); if (IS_ERR(page_data)) { result = -PTR_ERR(page_data); - uds_log_warning_strerror(result, + vdo_log_warning_strerror(result, "error reading physical page %u from volume", page_number); cancel_page_in_cache(&volume->page_cache, page_number, page); @@ -566,7 +566,7 @@ static int process_entry(struct volume *volume, struct queued_read *entry) } if (entry->invalid) { - uds_log_warning("Page %u invalidated after read", page_number); + vdo_log_warning("Page %u invalidated after read", page_number); cancel_page_in_cache(&volume->page_cache, page_number, page); return UDS_SUCCESS; } @@ -574,7 +574,7 @@ static int process_entry(struct volume *volume, struct queued_read *entry) if (!is_record_page(volume->geometry, page_number)) { result = initialize_index_page(volume, page_number, page); if (result != UDS_SUCCESS) { - uds_log_warning("Error initializing chapter index page"); + vdo_log_warning("Error initializing chapter index page"); cancel_page_in_cache(&volume->page_cache, page_number, page); return result; } @@ -582,7 +582,7 @@ static int process_entry(struct volume *volume, struct queued_read *entry) result = put_page_in_cache(&volume->page_cache, page_number, page); if (result != UDS_SUCCESS) { - uds_log_warning("Error putting page %u in cache", page_number); + vdo_log_warning("Error putting page %u in cache", page_number); cancel_page_in_cache(&volume->page_cache, page_number, page); return result; } @@ -624,7 +624,7 @@ static void read_thread_function(void *arg) { struct volume *volume = arg; - uds_log_debug("reader starting"); + vdo_log_debug("reader starting"); mutex_lock(&volume->read_threads_mutex); while (true) { struct queued_read *queue_entry; @@ -638,7 +638,7 @@ static void read_thread_function(void *arg) release_queued_requests(volume, queue_entry, result); } mutex_unlock(&volume->read_threads_mutex); - uds_log_debug("reader done"); + vdo_log_debug("reader done"); } static void get_page_and_index(struct page_cache *cache, u32 physical_page, @@ -701,7 +701,7 @@ static int read_page_locked(struct volume *volume, u32 physical_page, page_data = dm_bufio_read(volume->client, physical_page, &page->buffer); if (IS_ERR(page_data)) { result = -PTR_ERR(page_data); - uds_log_warning_strerror(result, + vdo_log_warning_strerror(result, "error reading physical page %u from volume", physical_page); cancel_page_in_cache(&volume->page_cache, physical_page, page); @@ -712,7 +712,7 @@ static int read_page_locked(struct volume *volume, u32 physical_page, result = initialize_index_page(volume, physical_page, page); if (result != UDS_SUCCESS) { if (volume->lookup_mode != LOOKUP_FOR_REBUILD) - uds_log_warning("Corrupt index page %u", physical_page); + vdo_log_warning("Corrupt index page %u", physical_page); cancel_page_in_cache(&volume->page_cache, physical_page, page); return result; } @@ -720,7 +720,7 @@ static int read_page_locked(struct volume *volume, u32 physical_page, result = put_page_in_cache(&volume->page_cache, physical_page, page); if (result != UDS_SUCCESS) { - uds_log_warning("Error putting page %u in cache", physical_page); + vdo_log_warning("Error putting page %u in cache", physical_page); cancel_page_in_cache(&volume->page_cache, physical_page, page); return result; } @@ -947,7 +947,7 @@ int uds_read_chapter_index_from_volume(const struct volume *volume, u64 virtual_ &volume_buffers[i]); if (IS_ERR(index_page)) { result = -PTR_ERR(index_page); - uds_log_warning_strerror(result, + vdo_log_warning_strerror(result, "error reading physical page %u", physical_page); return result; @@ -1039,7 +1039,7 @@ static void invalidate_page(struct page_cache *cache, u32 physical_page) wait_for_pending_searches(cache, page->physical_page); clear_cache_page(cache, page); } else if (queue_index > -1) { - uds_log_debug("setting pending read to invalid"); + vdo_log_debug("setting pending read to invalid"); cache->read_queue[queue_index].invalid = true; } } @@ -1051,7 +1051,7 @@ void uds_forget_chapter(struct volume *volume, u64 virtual_chapter) u32 first_page = map_to_physical_page(volume->geometry, physical_chapter, 0); u32 i; - uds_log_debug("forgetting chapter %llu", (unsigned long long) virtual_chapter); + vdo_log_debug("forgetting chapter %llu", (unsigned long long) virtual_chapter); mutex_lock(&volume->read_threads_mutex); for (i = 0; i < volume->geometry->pages_per_chapter; i++) invalidate_page(&volume->page_cache, first_page + i); @@ -1077,14 +1077,14 @@ static int donate_index_page_locked(struct volume *volume, u32 physical_chapter, physical_chapter, index_page_number, &page->index_page); if (result != UDS_SUCCESS) { - uds_log_warning("Error initialize chapter index page"); + vdo_log_warning("Error initialize chapter index page"); cancel_page_in_cache(&volume->page_cache, physical_page, page); return result; } result = put_page_in_cache(&volume->page_cache, physical_page, page); if (result != UDS_SUCCESS) { - uds_log_warning("Error putting page %u in cache", physical_page); + vdo_log_warning("Error putting page %u in cache", physical_page); cancel_page_in_cache(&volume->page_cache, physical_page, page); return result; } @@ -1112,7 +1112,7 @@ static int write_index_pages(struct volume *volume, u32 physical_chapter_number, page_data = dm_bufio_new(volume->client, physical_page, &page_buffer); if (IS_ERR(page_data)) { - return uds_log_warning_strerror(-PTR_ERR(page_data), + return vdo_log_warning_strerror(-PTR_ERR(page_data), "failed to prepare index page"); } @@ -1122,14 +1122,14 @@ static int write_index_pages(struct volume *volume, u32 physical_chapter_number, &lists_packed); if (result != UDS_SUCCESS) { dm_bufio_release(page_buffer); - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to pack index page"); } dm_bufio_mark_buffer_dirty(page_buffer); if (lists_packed == 0) { - uds_log_debug("no delta lists packed on chapter %u page %u", + vdo_log_debug("no delta lists packed on chapter %u page %u", physical_chapter_number, index_page_number); } else { delta_list_number += lists_packed; @@ -1221,14 +1221,14 @@ static int write_record_pages(struct volume *volume, u32 physical_chapter_number page_data = dm_bufio_new(volume->client, physical_page, &page_buffer); if (IS_ERR(page_data)) { - return uds_log_warning_strerror(-PTR_ERR(page_data), + return vdo_log_warning_strerror(-PTR_ERR(page_data), "failed to prepare record page"); } result = encode_record_page(volume, next_record, page_data); if (result != UDS_SUCCESS) { dm_bufio_release(page_buffer); - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to encode record page %u", record_page_number); } @@ -1259,7 +1259,7 @@ int uds_write_chapter(struct volume *volume, struct open_chapter_index *chapter_ result = -dm_bufio_write_dirty_buffers(volume->client); if (result != UDS_SUCCESS) - uds_log_error_strerror(result, "cannot sync chapter to volume"); + vdo_log_error_strerror(result, "cannot sync chapter to volume"); return result; } @@ -1286,7 +1286,7 @@ static void probe_chapter(struct volume *volume, u32 chapter_number, return; if (page->virtual_chapter_number == BAD_CHAPTER) { - uds_log_error("corrupt index page in chapter %u", + vdo_log_error("corrupt index page in chapter %u", chapter_number); return; } @@ -1294,14 +1294,14 @@ static void probe_chapter(struct volume *volume, u32 chapter_number, if (vcn == BAD_CHAPTER) { vcn = page->virtual_chapter_number; } else if (page->virtual_chapter_number != vcn) { - uds_log_error("inconsistent chapter %u index page %u: expected vcn %llu, got vcn %llu", + vdo_log_error("inconsistent chapter %u index page %u: expected vcn %llu, got vcn %llu", chapter_number, i, (unsigned long long) vcn, (unsigned long long) page->virtual_chapter_number); return; } if (expected_list_number != page->lowest_list_number) { - uds_log_error("inconsistent chapter %u index page %u: expected list number %u, got list number %u", + vdo_log_error("inconsistent chapter %u index page %u: expected list number %u, got list number %u", chapter_number, i, expected_list_number, page->lowest_list_number); return; @@ -1314,7 +1314,7 @@ static void probe_chapter(struct volume *volume, u32 chapter_number, } if (chapter_number != uds_map_to_physical_chapter(geometry, vcn)) { - uds_log_error("chapter %u vcn %llu is out of phase (%u)", chapter_number, + vdo_log_error("chapter %u vcn %llu is out of phase (%u)", chapter_number, (unsigned long long) vcn, geometry->chapters_per_volume); return; } @@ -1431,7 +1431,7 @@ static int find_chapter_limits(struct volume *volume, u32 chapter_limit, u64 *lo probe_chapter(volume, right_chapter, &highest); if (bad_chapters++ >= MAX_BAD_CHAPTERS) { - uds_log_error("too many bad chapters in volume: %u", + vdo_log_error("too many bad chapters in volume: %u", bad_chapters); return UDS_CORRUPT_DATA; } @@ -1555,7 +1555,7 @@ int uds_make_volume(const struct uds_configuration *config, struct index_layout result = uds_copy_index_geometry(config->geometry, &volume->geometry); if (result != UDS_SUCCESS) { uds_free_volume(volume); - return uds_log_warning_strerror(result, + return vdo_log_warning_strerror(result, "failed to allocate geometry: error"); } geometry = volume->geometry; diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c index a909a11204c1..3aa438f84ea1 100644 --- a/drivers/md/dm-vdo/int-map.c +++ b/drivers/md/dm-vdo/int-map.c @@ -381,7 +381,7 @@ static int resize_buckets(struct int_map *map) /* Re-initialize the map to be empty and 50% larger. */ size_t new_capacity = map->capacity / 2 * 3; - uds_log_info("%s: attempting resize from %zu to %zu, current size=%zu", + vdo_log_info("%s: attempting resize from %zu to %zu, current size=%zu", __func__, map->capacity, new_capacity, map->size); result = allocate_buckets(map, new_capacity); if (result != VDO_SUCCESS) { diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 61bb48068c3a..9a3716bb3c05 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -408,7 +408,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter * Clean up the partially initialized bio-queue entirely and indicate that * initialization failed. */ - uds_log_error("bio map initialization failed %d", result); + vdo_log_error("bio map initialization failed %d", result); vdo_cleanup_io_submitter(io_submitter); vdo_free_io_submitter(io_submitter); return result; @@ -423,7 +423,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter * initialization failed. */ vdo_int_map_free(vdo_forget(bio_queue_data->map)); - uds_log_error("bio queue initialization failed %d", result); + vdo_log_error("bio queue initialization failed %d", result); vdo_cleanup_io_submitter(io_submitter); vdo_free_io_submitter(io_submitter); return result; diff --git a/drivers/md/dm-vdo/logger.c b/drivers/md/dm-vdo/logger.c index 80f1e4c62ac6..3f7dc2cb6b98 100644 --- a/drivers/md/dm-vdo/logger.c +++ b/drivers/md/dm-vdo/logger.c @@ -16,14 +16,14 @@ #include "thread-device.h" #include "thread-utils.h" -int vdo_log_level = UDS_LOG_DEFAULT; +int vdo_log_level = VDO_LOG_DEFAULT; -int uds_get_log_level(void) +int vdo_get_log_level(void) { int log_level_latch = READ_ONCE(vdo_log_level); - if (unlikely(log_level_latch > UDS_LOG_MAX)) { - log_level_latch = UDS_LOG_DEFAULT; + if (unlikely(log_level_latch > VDO_LOG_MAX)) { + log_level_latch = VDO_LOG_DEFAULT; WRITE_ONCE(vdo_log_level, log_level_latch); } return log_level_latch; @@ -54,7 +54,7 @@ static void emit_log_message_to_kernel(int priority, const char *fmt, ...) va_list args; struct va_format vaf; - if (priority > uds_get_log_level()) + if (priority > vdo_get_log_level()) return; va_start(args, fmt); @@ -62,22 +62,22 @@ static void emit_log_message_to_kernel(int priority, const char *fmt, ...) vaf.va = &args; switch (priority) { - case UDS_LOG_EMERG: - case UDS_LOG_ALERT: - case UDS_LOG_CRIT: + case VDO_LOG_EMERG: + case VDO_LOG_ALERT: + case VDO_LOG_CRIT: pr_crit("%pV", &vaf); break; - case UDS_LOG_ERR: + case VDO_LOG_ERR: pr_err("%pV", &vaf); break; - case UDS_LOG_WARNING: + case VDO_LOG_WARNING: pr_warn("%pV", &vaf); break; - case UDS_LOG_NOTICE: - case UDS_LOG_INFO: + case VDO_LOG_NOTICE: + case VDO_LOG_INFO: pr_info("%pV", &vaf); break; - case UDS_LOG_DEBUG: + case VDO_LOG_DEBUG: pr_debug("%pV", &vaf); break; default: @@ -150,7 +150,7 @@ static void emit_log_message(int priority, const char *module, const char *prefi } /* - * uds_log_embedded_message() - Log a message embedded within another message. + * vdo_log_embedded_message() - Log a message embedded within another message. * @priority: the priority at which to log the message * @module: the name of the module doing the logging * @prefix: optional string prefix to message, may be NULL @@ -158,7 +158,7 @@ static void emit_log_message(int priority, const char *module, const char *prefi * @args1: arguments for message first part (required) * @fmt2: format of message second part */ -void uds_log_embedded_message(int priority, const char *module, const char *prefix, +void vdo_log_embedded_message(int priority, const char *module, const char *prefix, const char *fmt1, va_list args1, const char *fmt2, ...) { va_list args1_copy; @@ -168,7 +168,7 @@ void uds_log_embedded_message(int priority, const char *module, const char *pref va_start(args2, fmt2); if (module == NULL) - module = UDS_LOGGING_MODULE_NAME; + module = VDO_LOGGING_MODULE_NAME; if (prefix == NULL) prefix = ""; @@ -191,41 +191,41 @@ void uds_log_embedded_message(int priority, const char *module, const char *pref va_end(args2); } -int uds_vlog_strerror(int priority, int errnum, const char *module, const char *format, +int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format, va_list args) { - char errbuf[UDS_MAX_ERROR_MESSAGE_SIZE]; + char errbuf[VDO_MAX_ERROR_MESSAGE_SIZE]; const char *message = uds_string_error(errnum, errbuf, sizeof(errbuf)); - uds_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)", + vdo_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)", message, errnum); return errnum; } -int __uds_log_strerror(int priority, int errnum, const char *module, const char *format, ...) +int __vdo_log_strerror(int priority, int errnum, const char *module, const char *format, ...) { va_list args; va_start(args, format); - uds_vlog_strerror(priority, errnum, module, format, args); + vdo_vlog_strerror(priority, errnum, module, format, args); va_end(args); return errnum; } -void uds_log_backtrace(int priority) +void vdo_log_backtrace(int priority) { - if (priority > uds_get_log_level()) + if (priority > vdo_get_log_level()) return; dump_stack(); } -void __uds_log_message(int priority, const char *module, const char *format, ...) +void __vdo_log_message(int priority, const char *module, const char *format, ...) { va_list args; va_start(args, format); - uds_log_embedded_message(priority, module, NULL, format, args, "%s", ""); + vdo_log_embedded_message(priority, module, NULL, format, args, "%s", ""); va_end(args); } @@ -233,7 +233,7 @@ void __uds_log_message(int priority, const char *module, const char *format, ... * Sleep or delay a few milliseconds in an attempt to allow the log buffers to be flushed lest they * be overrun. */ -void uds_pause_for_logger(void) +void vdo_pause_for_logger(void) { fsleep(4000); } diff --git a/drivers/md/dm-vdo/logger.h b/drivers/md/dm-vdo/logger.h index a8cdf46b6fc1..ae6ad691c027 100644 --- a/drivers/md/dm-vdo/logger.h +++ b/drivers/md/dm-vdo/logger.h @@ -3,8 +3,8 @@ * Copyright 2023 Red Hat */ -#ifndef UDS_LOGGER_H -#define UDS_LOGGER_H +#ifndef VDO_LOGGER_H +#define VDO_LOGGER_H #include #include @@ -14,26 +14,26 @@ /* Custom logging utilities for UDS */ enum { - UDS_LOG_EMERG = LOGLEVEL_EMERG, - UDS_LOG_ALERT = LOGLEVEL_ALERT, - UDS_LOG_CRIT = LOGLEVEL_CRIT, - UDS_LOG_ERR = LOGLEVEL_ERR, - UDS_LOG_WARNING = LOGLEVEL_WARNING, - UDS_LOG_NOTICE = LOGLEVEL_NOTICE, - UDS_LOG_INFO = LOGLEVEL_INFO, - UDS_LOG_DEBUG = LOGLEVEL_DEBUG, - - UDS_LOG_MAX = UDS_LOG_DEBUG, - UDS_LOG_DEFAULT = UDS_LOG_INFO, + VDO_LOG_EMERG = LOGLEVEL_EMERG, + VDO_LOG_ALERT = LOGLEVEL_ALERT, + VDO_LOG_CRIT = LOGLEVEL_CRIT, + VDO_LOG_ERR = LOGLEVEL_ERR, + VDO_LOG_WARNING = LOGLEVEL_WARNING, + VDO_LOG_NOTICE = LOGLEVEL_NOTICE, + VDO_LOG_INFO = LOGLEVEL_INFO, + VDO_LOG_DEBUG = LOGLEVEL_DEBUG, + + VDO_LOG_MAX = VDO_LOG_DEBUG, + VDO_LOG_DEFAULT = VDO_LOG_INFO, }; extern int vdo_log_level; #define DM_MSG_PREFIX "vdo" -#define UDS_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX +#define VDO_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX /* Apply a rate limiter to a log method call. */ -#define uds_log_ratelimit(log_fn, ...) \ +#define vdo_log_ratelimit(log_fn, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ @@ -43,58 +43,58 @@ extern int vdo_log_level; } \ } while (0) -int uds_get_log_level(void); +int vdo_get_log_level(void); -void uds_log_embedded_message(int priority, const char *module, const char *prefix, +void vdo_log_embedded_message(int priority, const char *module, const char *prefix, const char *fmt1, va_list args1, const char *fmt2, ...) __printf(4, 0) __printf(6, 7); -void uds_log_backtrace(int priority); +void vdo_log_backtrace(int priority); /* All log functions will preserve the caller's value of errno. */ -#define uds_log_strerror(priority, errnum, ...) \ - __uds_log_strerror(priority, errnum, UDS_LOGGING_MODULE_NAME, __VA_ARGS__) +#define vdo_log_strerror(priority, errnum, ...) \ + __vdo_log_strerror(priority, errnum, VDO_LOGGING_MODULE_NAME, __VA_ARGS__) -int __uds_log_strerror(int priority, int errnum, const char *module, +int __vdo_log_strerror(int priority, int errnum, const char *module, const char *format, ...) __printf(4, 5); -int uds_vlog_strerror(int priority, int errnum, const char *module, const char *format, +int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format, va_list args) __printf(4, 0); /* Log an error prefixed with the string associated with the errnum. */ -#define uds_log_error_strerror(errnum, ...) \ - uds_log_strerror(UDS_LOG_ERR, errnum, __VA_ARGS__) +#define vdo_log_error_strerror(errnum, ...) \ + vdo_log_strerror(VDO_LOG_ERR, errnum, __VA_ARGS__) -#define uds_log_debug_strerror(errnum, ...) \ - uds_log_strerror(UDS_LOG_DEBUG, errnum, __VA_ARGS__) +#define vdo_log_debug_strerror(errnum, ...) \ + vdo_log_strerror(VDO_LOG_DEBUG, errnum, __VA_ARGS__) -#define uds_log_info_strerror(errnum, ...) \ - uds_log_strerror(UDS_LOG_INFO, errnum, __VA_ARGS__) +#define vdo_log_info_strerror(errnum, ...) \ + vdo_log_strerror(VDO_LOG_INFO, errnum, __VA_ARGS__) -#define uds_log_warning_strerror(errnum, ...) \ - uds_log_strerror(UDS_LOG_WARNING, errnum, __VA_ARGS__) +#define vdo_log_warning_strerror(errnum, ...) \ + vdo_log_strerror(VDO_LOG_WARNING, errnum, __VA_ARGS__) -#define uds_log_fatal_strerror(errnum, ...) \ - uds_log_strerror(UDS_LOG_CRIT, errnum, __VA_ARGS__) +#define vdo_log_fatal_strerror(errnum, ...) \ + vdo_log_strerror(VDO_LOG_CRIT, errnum, __VA_ARGS__) -#define uds_log_message(priority, ...) \ - __uds_log_message(priority, UDS_LOGGING_MODULE_NAME, __VA_ARGS__) +#define vdo_log_message(priority, ...) \ + __vdo_log_message(priority, VDO_LOGGING_MODULE_NAME, __VA_ARGS__) -void __uds_log_message(int priority, const char *module, const char *format, ...) +void __vdo_log_message(int priority, const char *module, const char *format, ...) __printf(3, 4); -#define uds_log_debug(...) uds_log_message(UDS_LOG_DEBUG, __VA_ARGS__) +#define vdo_log_debug(...) vdo_log_message(VDO_LOG_DEBUG, __VA_ARGS__) -#define uds_log_info(...) uds_log_message(UDS_LOG_INFO, __VA_ARGS__) +#define vdo_log_info(...) vdo_log_message(VDO_LOG_INFO, __VA_ARGS__) -#define uds_log_warning(...) uds_log_message(UDS_LOG_WARNING, __VA_ARGS__) +#define vdo_log_warning(...) vdo_log_message(VDO_LOG_WARNING, __VA_ARGS__) -#define uds_log_error(...) uds_log_message(UDS_LOG_ERR, __VA_ARGS__) +#define vdo_log_error(...) vdo_log_message(VDO_LOG_ERR, __VA_ARGS__) -#define uds_log_fatal(...) uds_log_message(UDS_LOG_CRIT, __VA_ARGS__) +#define vdo_log_fatal(...) vdo_log_message(VDO_LOG_CRIT, __VA_ARGS__) -void uds_pause_for_logger(void); -#endif /* UDS_LOGGER_H */ +void vdo_pause_for_logger(void); +#endif /* VDO_LOGGER_H */ diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index 300f9d2d2d5c..258bc55e419b 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -363,8 +363,8 @@ struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone) */ void vdo_dump_logical_zone(const struct logical_zone *zone) { - uds_log_info("logical_zone %u", zone->zone_number); - uds_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu", + vdo_log_info("logical_zone %u", zone->zone_number); + vdo_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu", (unsigned long long) READ_ONCE(zone->flush_generation), (unsigned long long) READ_ONCE(zone->oldest_active_generation), (unsigned long long) READ_ONCE(zone->notification_generation), diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c index 62bb717c4c50..185f259c7245 100644 --- a/drivers/md/dm-vdo/memory-alloc.c +++ b/drivers/md/dm-vdo/memory-alloc.c @@ -150,7 +150,7 @@ static void remove_vmalloc_block(void *ptr) if (block != NULL) vdo_free(block); else - uds_log_info("attempting to remove ptr %px not found in vmalloc list", ptr); + vdo_log_info("attempting to remove ptr %px not found in vmalloc list", ptr); } /* @@ -284,7 +284,7 @@ int vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr) memalloc_noio_restore(noio_flags); if (unlikely(p == NULL)) { - uds_log_error("Could not allocate %zu bytes for %s in %u msecs", + vdo_log_error("Could not allocate %zu bytes for %s in %u msecs", size, what, jiffies_to_msecs(jiffies - start_time)); return -ENOMEM; } @@ -391,7 +391,7 @@ void vdo_memory_exit(void) VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); - uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); + vdo_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); } void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used) @@ -426,13 +426,13 @@ void vdo_report_memory_usage(void) peak_usage = memory_stats.peak_bytes; spin_unlock_irqrestore(&memory_stats.lock, flags); total_bytes = kmalloc_bytes + vmalloc_bytes; - uds_log_info("current module memory tracking (actual allocation sizes, not requested):"); - uds_log_info(" %llu bytes in %llu kmalloc blocks", + vdo_log_info("current module memory tracking (actual allocation sizes, not requested):"); + vdo_log_info(" %llu bytes in %llu kmalloc blocks", (unsigned long long) kmalloc_bytes, (unsigned long long) kmalloc_blocks); - uds_log_info(" %llu bytes in %llu vmalloc blocks", + vdo_log_info(" %llu bytes in %llu vmalloc blocks", (unsigned long long) vmalloc_bytes, (unsigned long long) vmalloc_blocks); - uds_log_info(" total %llu bytes, peak usage %llu bytes", + vdo_log_info(" total %llu bytes, peak usage %llu bytes", (unsigned long long) total_bytes, (unsigned long long) peak_usage); } diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c index 18c9d2af8aed..2802cf92922b 100644 --- a/drivers/md/dm-vdo/message-stats.c +++ b/drivers/md/dm-vdo/message-stats.c @@ -421,7 +421,7 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen) result = vdo_allocate(1, struct vdo_statistics, __func__, &stats); if (result != VDO_SUCCESS) { - uds_log_error("Cannot allocate memory to write VDO statistics"); + vdo_log_error("Cannot allocate memory to write VDO statistics"); return result; } diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index 4d45243161a6..16cf29b4c90a 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -748,7 +748,7 @@ static void dump_packer_bin(const struct packer_bin *bin, bool canceled) /* Don't dump empty bins. */ return; - uds_log_info(" %sBin slots_used=%u free_space=%zu", + vdo_log_info(" %sBin slots_used=%u free_space=%zu", (canceled ? "Canceled" : ""), bin->slots_used, bin->free_space); /* @@ -767,8 +767,8 @@ void vdo_dump_packer(const struct packer *packer) { struct packer_bin *bin; - uds_log_info("packer"); - uds_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu", + vdo_log_info("packer"); + vdo_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu", (unsigned long long) packer->flush_generation, vdo_get_admin_state_code(&packer->state)->name, (unsigned long long) packer->size); diff --git a/drivers/md/dm-vdo/permassert.c b/drivers/md/dm-vdo/permassert.c index 6fe49c4b7e51..bf9eccea1cb3 100644 --- a/drivers/md/dm-vdo/permassert.c +++ b/drivers/md/dm-vdo/permassert.c @@ -15,10 +15,10 @@ int vdo_assertion_failed(const char *expression_string, const char *file_name, va_start(args, format); - uds_log_embedded_message(UDS_LOG_ERR, UDS_LOGGING_MODULE_NAME, "assertion \"", + vdo_log_embedded_message(VDO_LOG_ERR, VDO_LOGGING_MODULE_NAME, "assertion \"", format, args, "\" (%s) failed at %s:%d", expression_string, file_name, line_number); - uds_log_backtrace(UDS_LOG_ERR); + vdo_log_backtrace(VDO_LOG_ERR); va_end(args); diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index 6678f472fb44..2fee3a7c1191 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -163,7 +163,7 @@ static void release_pbn_lock_provisional_reference(struct pbn_lock *lock, result = vdo_release_block_reference(allocator, locked_pbn); if (result != VDO_SUCCESS) { - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "Failed to release reference to %s physical block %llu", lock->implementation->release_reason, (unsigned long long) locked_pbn); @@ -294,7 +294,7 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool, idle_pbn_lock *idle; if (pool->borrowed >= pool->capacity) - return uds_log_error_strerror(VDO_LOCK_ERROR, + return vdo_log_error_strerror(VDO_LOCK_ERROR, "no free PBN locks left to borrow"); pool->borrowed += 1; @@ -499,7 +499,7 @@ static int allocate_and_lock_block(struct allocation *allocation) if (lock->holder_count > 0) { /* This block is already locked, which should be impossible. */ - return uds_log_error_strerror(VDO_LOCK_ERROR, + return vdo_log_error_strerror(VDO_LOCK_ERROR, "Newly allocated block %llu was spuriously locked (holder_count=%u)", (unsigned long long) allocation->pbn, lock->holder_count); diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index 6df373b88042..ee6321a3e523 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -804,7 +804,7 @@ void vdo_free_recovery_journal(struct recovery_journal *journal) "journal being freed has no active tail blocks"); } else if (!vdo_is_state_saved(&journal->state) && !list_empty(&journal->active_tail_blocks)) { - uds_log_warning("journal being freed has uncommitted entries"); + vdo_log_warning("journal being freed has uncommitted entries"); } for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) { @@ -1305,7 +1305,7 @@ static void handle_write_error(struct vdo_completion *completion) struct recovery_journal *journal = block->journal; vio_record_metadata_io_error(as_vio(completion)); - uds_log_error_strerror(completion->result, + vdo_log_error_strerror(completion->result, "cannot write recovery journal block %llu", (unsigned long long) block->sequence_number); enter_journal_read_only_mode(journal, completion->result); @@ -1719,7 +1719,7 @@ vdo_get_recovery_journal_statistics(const struct recovery_journal *journal) */ static void dump_recovery_block(const struct recovery_journal_block *block) { - uds_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters", + vdo_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters", (unsigned long long) block->sequence_number, block->entry_count, (block->committing ? "committing" : "waiting"), vdo_waitq_num_waiters(&block->entry_waiters), @@ -1736,8 +1736,8 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal const struct recovery_journal_block *block; struct recovery_journal_statistics stats = vdo_get_recovery_journal_statistics(journal); - uds_log_info("Recovery Journal"); - uds_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu", + vdo_log_info("Recovery Journal"); + vdo_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu", (unsigned long long) journal->block_map_head, (unsigned long long) journal->slab_journal_head, (unsigned long long) journal->last_write_acknowledged, @@ -1747,16 +1747,16 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal (unsigned long long) stats.disk_full, (unsigned long long) stats.slab_journal_commits_requested, vdo_waitq_num_waiters(&journal->entry_waiters)); - uds_log_info(" entries: started=%llu written=%llu committed=%llu", + vdo_log_info(" entries: started=%llu written=%llu committed=%llu", (unsigned long long) stats.entries.started, (unsigned long long) stats.entries.written, (unsigned long long) stats.entries.committed); - uds_log_info(" blocks: started=%llu written=%llu committed=%llu", + vdo_log_info(" blocks: started=%llu written=%llu committed=%llu", (unsigned long long) stats.blocks.started, (unsigned long long) stats.blocks.written, (unsigned long long) stats.blocks.committed); - uds_log_info(" active blocks:"); + vdo_log_info(" active blocks:"); list_for_each_entry(block, &journal->active_tail_blocks, list_node) dump_recovery_block(block); } diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index c7abb8078336..defc9359f10e 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -265,13 +265,13 @@ static void finish_repair(struct vdo_completion *completion) free_repair_completion(vdo_forget(repair)); if (vdo_state_requires_read_only_rebuild(vdo->load_state)) { - uds_log_info("Read-only rebuild complete"); + vdo_log_info("Read-only rebuild complete"); vdo_launch_completion(parent); return; } /* FIXME: shouldn't this say either "recovery" or "repair"? */ - uds_log_info("Rebuild complete"); + vdo_log_info("Rebuild complete"); /* * Now that we've freed the repair completion and its vast array of journal entries, we @@ -291,9 +291,9 @@ static void abort_repair(struct vdo_completion *completion) struct repair_completion *repair = as_repair_completion(completion); if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) - uds_log_info("Read-only rebuild aborted"); + vdo_log_info("Read-only rebuild aborted"); else - uds_log_warning("Recovery aborted"); + vdo_log_warning("Recovery aborted"); free_repair_completion(vdo_forget(repair)); vdo_continue_completion(parent, result); @@ -329,10 +329,10 @@ static void drain_slab_depot(struct vdo_completion *completion) prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN); if (vdo_state_requires_read_only_rebuild(vdo->load_state)) { - uds_log_info("Saving rebuilt state"); + vdo_log_info("Saving rebuilt state"); operation = VDO_ADMIN_STATE_REBUILDING; } else { - uds_log_info("Replayed %zu journal entries into slab journals", + vdo_log_info("Replayed %zu journal entries into slab journals", repair->entries_added_to_slab_journals); operation = VDO_ADMIN_STATE_RECOVERING; } @@ -350,7 +350,7 @@ static void flush_block_map_updates(struct vdo_completion *completion) { vdo_assert_on_admin_thread(completion->vdo, __func__); - uds_log_info("Flushing block map changes"); + vdo_log_info("Flushing block map changes"); prepare_repair_completion(as_repair_completion(completion), drain_slab_depot, VDO_ZONE_TYPE_ADMIN); vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING, @@ -449,7 +449,7 @@ static bool process_slot(struct block_map_page *page, struct vdo_completion *com if (result == VDO_SUCCESS) return true; - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "Could not adjust reference count for PBN %llu, slot %u mapped to PBN %llu", (unsigned long long) vdo_get_block_map_page_pbn(page), slot, (unsigned long long) mapping.pbn); @@ -615,7 +615,7 @@ static int process_entry(physical_block_number_t pbn, struct vdo_completion *com int result; if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) { - return uds_log_error_strerror(VDO_BAD_CONFIGURATION, + return vdo_log_error_strerror(VDO_BAD_CONFIGURATION, "PBN %llu out of range", (unsigned long long) pbn); } @@ -623,7 +623,7 @@ static int process_entry(physical_block_number_t pbn, struct vdo_completion *com result = vdo_adjust_reference_count_for_rebuild(depot, pbn, VDO_JOURNAL_BLOCK_MAP_REMAPPING); if (result != VDO_SUCCESS) { - return uds_log_error_strerror(result, + return vdo_log_error_strerror(result, "Could not adjust reference count for block map tree PBN %llu", (unsigned long long) pbn); } @@ -758,7 +758,7 @@ static int validate_recovery_journal_entry(const struct vdo *vdo, !vdo_is_valid_location(&entry->unmapping) || !vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) || !vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) { - return uds_log_error_strerror(VDO_CORRUPT_JOURNAL, + return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL, "Invalid entry: %s (%llu, %u) from %llu to %llu is not within bounds", vdo_get_journal_operation_name(entry->operation), (unsigned long long) entry->slot.pbn, @@ -772,7 +772,7 @@ static int validate_recovery_journal_entry(const struct vdo *vdo, (entry->mapping.pbn == VDO_ZERO_BLOCK) || (entry->unmapping.state != VDO_MAPPING_STATE_UNMAPPED) || (entry->unmapping.pbn != VDO_ZERO_BLOCK))) { - return uds_log_error_strerror(VDO_CORRUPT_JOURNAL, + return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL, "Invalid entry: %s (%llu, %u) from %llu to %llu is not a valid tree mapping", vdo_get_journal_operation_name(entry->operation), (unsigned long long) entry->slot.pbn, @@ -875,7 +875,7 @@ void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *cont .entry_count = 0, }; - uds_log_info("Replaying entries into slab journals for zone %u", + vdo_log_info("Replaying entries into slab journals for zone %u", allocator->zone_number); completion->parent = repair; add_slab_journal_entries(completion); @@ -907,7 +907,7 @@ static void flush_block_map(struct vdo_completion *completion) vdo_assert_on_admin_thread(completion->vdo, __func__); - uds_log_info("Flushing block map changes"); + vdo_log_info("Flushing block map changes"); prepare_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN); operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ? VDO_ADMIN_STATE_REBUILDING : @@ -1107,7 +1107,7 @@ static void recover_block_map(struct vdo_completion *completion) vdo_state_requires_read_only_rebuild(vdo->load_state); if (repair->block_map_entry_count == 0) { - uds_log_info("Replaying 0 recovery entries into block map"); + vdo_log_info("Replaying 0 recovery entries into block map"); vdo_free(vdo_forget(repair->journal_data)); launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN); return; @@ -1124,7 +1124,7 @@ static void recover_block_map(struct vdo_completion *completion) }; min_heapify_all(&repair->replay_heap, &repair_min_heap); - uds_log_info("Replaying %zu recovery entries into block map", + vdo_log_info("Replaying %zu recovery entries into block map", repair->block_map_entry_count); repair->current_entry = &repair->entries[repair->block_map_entry_count - 1]; @@ -1437,7 +1437,7 @@ static int validate_heads(struct repair_completion *repair) return VDO_SUCCESS; - return uds_log_error_strerror(VDO_CORRUPT_JOURNAL, + return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL, "Journal tail too early. block map head: %llu, slab journal head: %llu, tail: %llu", (unsigned long long) repair->block_map_head, (unsigned long long) repair->slab_journal_head, @@ -1571,7 +1571,7 @@ static int parse_journal_for_recovery(struct repair_completion *repair) header = get_recovery_journal_block_header(journal, repair->journal_data, i); if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) { /* This is an old format block, so we need to upgrade */ - uds_log_error_strerror(VDO_UNSUPPORTED_VERSION, + vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "Recovery journal is in the old format, a read-only rebuild is required."); vdo_enter_read_only_mode(repair->completion.vdo, VDO_UNSUPPORTED_VERSION); @@ -1628,7 +1628,7 @@ static int parse_journal_for_recovery(struct repair_completion *repair) if (result != VDO_SUCCESS) return result; - uds_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu", + vdo_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu", (unsigned long long) repair->highest_tail, (unsigned long long) repair->tail); @@ -1656,7 +1656,7 @@ static void finish_journal_load(struct vdo_completion *completion) if (++repair->vios_complete != repair->vio_count) return; - uds_log_info("Finished reading recovery journal"); + vdo_log_info("Finished reading recovery journal"); uninitialize_vios(repair); prepare_repair_completion(repair, recover_block_map, VDO_ZONE_TYPE_LOGICAL); vdo_continue_completion(&repair->completion, parse_journal(repair)); @@ -1701,12 +1701,12 @@ void vdo_repair(struct vdo_completion *parent) vdo_assert_on_admin_thread(vdo, __func__); if (vdo->load_state == VDO_FORCE_REBUILD) { - uds_log_warning("Rebuilding reference counts to clear read-only mode"); + vdo_log_warning("Rebuilding reference counts to clear read-only mode"); vdo->states.vdo.read_only_recoveries++; } else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) { - uds_log_warning("Rebuilding reference counts for upgrade"); + vdo_log_warning("Rebuilding reference counts for upgrade"); } else { - uds_log_warning("Device was dirty, rebuilding reference counts"); + vdo_log_warning("Device was dirty, rebuilding reference counts"); } result = vdo_allocate_extended(struct repair_completion, page_count, diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index 00746de09c12..dc9f3d3c3995 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -568,7 +568,7 @@ static void release_journal_locks(struct vdo_waiter *waiter, void *context) * Don't bother logging what might be lots of errors if we are already in * read-only mode. */ - uds_log_error_strerror(result, "failed slab summary update %llu", + vdo_log_error_strerror(result, "failed slab summary update %llu", (unsigned long long) journal->summarized); } @@ -702,7 +702,7 @@ static void complete_write(struct vdo_completion *completion) if (result != VDO_SUCCESS) { vio_record_metadata_io_error(as_vio(completion)); - uds_log_error_strerror(result, "cannot write slab journal block %llu", + vdo_log_error_strerror(result, "cannot write slab journal block %llu", (unsigned long long) committed); vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); check_if_slab_drained(journal->slab); @@ -1020,7 +1020,7 @@ static void finish_summary_update(struct vdo_waiter *waiter, void *context) slab->active_count--; if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) { - uds_log_error_strerror(result, "failed to update slab summary"); + vdo_log_error_strerror(result, "failed to update slab summary"); vdo_enter_read_only_mode(slab->allocator->depot->vdo, result); } @@ -1440,7 +1440,7 @@ static int increment_for_data(struct vdo_slab *slab, struct reference_block *blo default: /* Single or shared */ if (*counter_ptr >= MAXIMUM_REFERENCE_COUNT) { - return uds_log_error_strerror(VDO_REF_COUNT_INVALID, + return vdo_log_error_strerror(VDO_REF_COUNT_INVALID, "Incrementing a block already having 254 references (slab %u, offset %u)", slab->slab_number, block_number); } @@ -1473,7 +1473,7 @@ static int decrement_for_data(struct vdo_slab *slab, struct reference_block *blo { switch (old_status) { case RS_FREE: - return uds_log_error_strerror(VDO_REF_COUNT_INVALID, + return vdo_log_error_strerror(VDO_REF_COUNT_INVALID, "Decrementing free block at offset %u in slab %u", block_number, slab->slab_number); @@ -1537,7 +1537,7 @@ static int increment_for_block_map(struct vdo_slab *slab, struct reference_block switch (old_status) { case RS_FREE: if (normal_operation) { - return uds_log_error_strerror(VDO_REF_COUNT_INVALID, + return vdo_log_error_strerror(VDO_REF_COUNT_INVALID, "Incrementing unallocated block map block (slab %u, offset %u)", slab->slab_number, block_number); } @@ -1552,7 +1552,7 @@ static int increment_for_block_map(struct vdo_slab *slab, struct reference_block case RS_PROVISIONAL: if (!normal_operation) - return uds_log_error_strerror(VDO_REF_COUNT_INVALID, + return vdo_log_error_strerror(VDO_REF_COUNT_INVALID, "Block map block had provisional reference during replay (slab %u, offset %u)", slab->slab_number, block_number); @@ -1562,7 +1562,7 @@ static int increment_for_block_map(struct vdo_slab *slab, struct reference_block return VDO_SUCCESS; default: - return uds_log_error_strerror(VDO_REF_COUNT_INVALID, + return vdo_log_error_strerror(VDO_REF_COUNT_INVALID, "Incrementing a block map block which is already referenced %u times (slab %u, offset %u)", *counter_ptr, slab->slab_number, block_number); @@ -2219,7 +2219,7 @@ static void unpack_reference_block(struct packed_reference_block *packed, block->commit_points[i])) { size_t block_index = block - block->slab->reference_blocks; - uds_log_warning("Torn write detected in sector %u of reference block %zu of slab %u", + vdo_log_warning("Torn write detected in sector %u of reference block %zu of slab %u", i, block_index, block->slab->slab_number); } } @@ -2698,9 +2698,9 @@ static void finish_scrubbing(struct slab_scrubber *scrubber, int result) * thread does not yet know about. */ if (prior_state == VDO_DIRTY) - uds_log_info("VDO commencing normal operation"); + vdo_log_info("VDO commencing normal operation"); else if (prior_state == VDO_RECOVERING) - uds_log_info("Exiting recovery mode"); + vdo_log_info("Exiting recovery mode"); } /* @@ -2790,7 +2790,7 @@ static int apply_block_entries(struct packed_slab_journal_block *block, if (entry.sbn > max_sbn) { /* This entry is out of bounds. */ - return uds_log_error_strerror(VDO_CORRUPT_JOURNAL, + return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL, "vdo_slab journal entry (%llu, %u) had invalid offset %u in slab (size %u blocks)", (unsigned long long) block_number, entry_point.entry_count, @@ -2799,7 +2799,7 @@ static int apply_block_entries(struct packed_slab_journal_block *block, result = replay_reference_count_change(slab, &entry_point, entry); if (result != VDO_SUCCESS) { - uds_log_error_strerror(result, + vdo_log_error_strerror(result, "vdo_slab journal entry (%llu, %u) (%s of offset %u) could not be applied in slab %u", (unsigned long long) block_number, entry_point.entry_count, @@ -2857,7 +2857,7 @@ static void apply_journal_entries(struct vdo_completion *completion) (header.has_block_map_increments && (header.entry_count > journal->full_entries_per_block))) { /* The block is not what we expect it to be. */ - uds_log_error("vdo_slab journal block for slab %u was invalid", + vdo_log_error("vdo_slab journal block for slab %u was invalid", slab->slab_number); abort_scrubbing(scrubber, VDO_CORRUPT_JOURNAL); return; @@ -3580,22 +3580,22 @@ void vdo_dump_block_allocator(const struct block_allocator *allocator) struct slab_iterator iterator = get_slab_iterator(allocator); const struct slab_scrubber *scrubber = &allocator->scrubber; - uds_log_info("block_allocator zone %u", allocator->zone_number); + vdo_log_info("block_allocator zone %u", allocator->zone_number); while (iterator.next != NULL) { struct vdo_slab *slab = next_slab(&iterator); struct slab_journal *journal = &slab->journal; if (slab->reference_blocks != NULL) { /* Terse because there are a lot of slabs to dump and syslog is lossy. */ - uds_log_info("slab %u: P%u, %llu free", slab->slab_number, + vdo_log_info("slab %u: P%u, %llu free", slab->slab_number, slab->priority, (unsigned long long) slab->free_blocks); } else { - uds_log_info("slab %u: status %s", slab->slab_number, + vdo_log_info("slab %u: status %s", slab->slab_number, status_to_string(slab->status)); } - uds_log_info(" slab journal: entry_waiters=%zu waiting_to_commit=%s updating_slab_summary=%s head=%llu unreapable=%llu tail=%llu next_commit=%llu summarized=%llu last_summarized=%llu recovery_lock=%llu dirty=%s", + vdo_log_info(" slab journal: entry_waiters=%zu waiting_to_commit=%s updating_slab_summary=%s head=%llu unreapable=%llu tail=%llu next_commit=%llu summarized=%llu last_summarized=%llu recovery_lock=%llu dirty=%s", vdo_waitq_num_waiters(&journal->entry_waiters), uds_bool_to_string(journal->waiting_to_commit), uds_bool_to_string(journal->updating_slab_summary), @@ -3614,7 +3614,7 @@ void vdo_dump_block_allocator(const struct block_allocator *allocator) if (slab->counters != NULL) { /* Terse because there are a lot of slabs to dump and syslog is lossy. */ - uds_log_info(" slab: free=%u/%u blocks=%u dirty=%zu active=%zu journal@(%llu,%u)", + vdo_log_info(" slab: free=%u/%u blocks=%u dirty=%zu active=%zu journal@(%llu,%u)", slab->free_blocks, slab->block_count, slab->reference_block_count, vdo_waitq_num_waiters(&slab->dirty_blocks), @@ -3622,7 +3622,7 @@ void vdo_dump_block_allocator(const struct block_allocator *allocator) (unsigned long long) slab->slab_journal_point.sequence_number, slab->slab_journal_point.entry_count); } else { - uds_log_info(" no counters"); + vdo_log_info(" no counters"); } /* @@ -3631,11 +3631,11 @@ void vdo_dump_block_allocator(const struct block_allocator *allocator) */ if (pause_counter++ == 31) { pause_counter = 0; - uds_pause_for_logger(); + vdo_pause_for_logger(); } } - uds_log_info("slab_scrubber slab_count %u waiters %zu %s%s", + vdo_log_info("slab_scrubber slab_count %u waiters %zu %s%s", READ_ONCE(scrubber->slab_count), vdo_waitq_num_waiters(&scrubber->waiters), vdo_get_admin_state_code(&scrubber->admin_state)->name, @@ -4109,7 +4109,7 @@ static int allocate_components(struct slab_depot *depot, slab_count = vdo_compute_slab_count(depot->first_block, depot->last_block, depot->slab_size_shift); if (thread_config->physical_zone_count > slab_count) { - return uds_log_error_strerror(VDO_BAD_CONFIGURATION, + return vdo_log_error_strerror(VDO_BAD_CONFIGURATION, "%u physical zones exceeds slab count %u", thread_config->physical_zone_count, slab_count); @@ -4167,7 +4167,7 @@ int vdo_decode_slab_depot(struct slab_depot_state_2_0 state, struct vdo *vdo, block_count_t slab_size = state.slab_config.slab_blocks; if (!is_power_of_2(slab_size)) { - return uds_log_error_strerror(UDS_INVALID_ARGUMENT, + return vdo_log_error_strerror(UDS_INVALID_ARGUMENT, "slab size must be a power of two"); } slab_size_shift = ilog2(slab_size); @@ -4676,7 +4676,7 @@ int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot, new_state.last_block, depot->slab_size_shift); if (new_slab_count <= depot->slab_count) - return uds_log_error_strerror(VDO_INCREMENT_TOO_SMALL, + return vdo_log_error_strerror(VDO_INCREMENT_TOO_SMALL, "Depot can only grow"); if (new_slab_count == depot->new_slab_count) { /* Check it out, we've already got all the new slabs allocated! */ @@ -5092,8 +5092,8 @@ void vdo_get_slab_depot_statistics(const struct slab_depot *depot, */ void vdo_dump_slab_depot(const struct slab_depot *depot) { - uds_log_info("vdo slab depot"); - uds_log_info(" zone_count=%u old_zone_count=%u slabCount=%u active_release_request=%llu new_release_request=%llu", + vdo_log_info("vdo slab depot"); + vdo_log_info(" zone_count=%u old_zone_count=%u slabCount=%u active_release_request=%llu new_release_request=%llu", (unsigned int) depot->zone_count, (unsigned int) depot->old_zone_count, READ_ONCE(depot->slab_count), (unsigned long long) depot->active_release_request, diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c index 42e87b2344bc..918e46e7121f 100644 --- a/drivers/md/dm-vdo/status-codes.c +++ b/drivers/md/dm-vdo/status-codes.c @@ -87,8 +87,8 @@ int vdo_register_status_codes(void) */ int vdo_status_to_errno(int error) { - char error_name[UDS_MAX_ERROR_NAME_SIZE]; - char error_message[UDS_MAX_ERROR_MESSAGE_SIZE]; + char error_name[VDO_MAX_ERROR_NAME_SIZE]; + char error_message[VDO_MAX_ERROR_MESSAGE_SIZE]; /* 0 is success, negative a system error code */ if (likely(error <= 0)) @@ -103,7 +103,7 @@ int vdo_status_to_errno(int error) case VDO_READ_ONLY: return -EIO; default: - uds_log_info("%s: mapping internal status code %d (%s: %s) to EIO", + vdo_log_info("%s: mapping internal status code %d (%s: %s) to EIO", __func__, error, uds_string_error_name(error, error_name, sizeof(error_name)), uds_string_error(error, error_message, sizeof(error_message))); diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c index c822df86f731..bd620be61c1d 100644 --- a/drivers/md/dm-vdo/thread-utils.c +++ b/drivers/md/dm-vdo/thread-utils.c @@ -84,7 +84,7 @@ int vdo_create_thread(void (*thread_function)(void *), void *thread_data, result = vdo_allocate(1, struct thread, __func__, &thread); if (result != VDO_SUCCESS) { - uds_log_warning("Error allocating memory for %s", name); + vdo_log_warning("Error allocating memory for %s", name); return result; } diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 28e6352c758e..fff847767755 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -304,7 +304,7 @@ static int __must_check read_geometry_block(struct vdo *vdo) result = blk_status_to_errno(vio->bio->bi_status); free_vio(vdo_forget(vio)); if (result != 0) { - uds_log_error_strerror(result, "synchronous read failed"); + vdo_log_error_strerror(result, "synchronous read failed"); vdo_free(block); return -EIO; } @@ -493,7 +493,7 @@ static int initialize_vdo(struct vdo *vdo, struct device_config *config, return result; } - uds_log_info("zones: %d logical, %d physical, %d hash; total threads: %d", + vdo_log_info("zones: %d logical, %d physical, %d hash; total threads: %d", config->thread_counts.logical_zones, config->thread_counts.physical_zones, config->thread_counts.hash_zones, vdo->thread_config.thread_count); @@ -841,7 +841,7 @@ int vdo_synchronous_flush(struct vdo *vdo) atomic64_inc(&vdo->stats.flush_out); if (result != 0) { - uds_log_error_strerror(result, "synchronous flush failed"); + vdo_log_error_strerror(result, "synchronous flush failed"); result = -EIO; } @@ -928,7 +928,7 @@ static void handle_save_error(struct vdo_completion *completion) container_of(as_vio(completion), struct vdo_super_block, vio); vio_record_metadata_io_error(&super_block->vio); - uds_log_error_strerror(completion->result, "super block save failed"); + vdo_log_error_strerror(completion->result, "super block save failed"); /* * Mark the super block as unwritable so that we won't attempt to write it again. This * avoids the case where a growth attempt fails writing the super block with the new size, @@ -1154,7 +1154,7 @@ static void make_thread_read_only(struct vdo_completion *completion) thread->is_read_only = true; listener = thread->listeners; if (thread_id == 0) - uds_log_error_strerror(READ_ONCE(notifier->read_only_error), + vdo_log_error_strerror(READ_ONCE(notifier->read_only_error), "Unrecoverable error, entering read-only mode"); } else { /* We've just finished notifying a listener */ @@ -1329,7 +1329,7 @@ void vdo_enter_recovery_mode(struct vdo *vdo) if (vdo_in_read_only_mode(vdo)) return; - uds_log_info("Entering recovery mode"); + vdo_log_info("Entering recovery mode"); vdo_set_state(vdo, VDO_RECOVERING); } @@ -1382,7 +1382,7 @@ static void set_compression_callback(struct vdo_completion *completion) } } - uds_log_info("compression is %s", (*enable ? "enabled" : "disabled")); + vdo_log_info("compression is %s", (*enable ? "enabled" : "disabled")); *enable = was_enabled; complete_synchronous_action(completion); } diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index b1e4e604c2c3..b291578f726f 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -131,7 +131,7 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, */ result = vdo_allocate(1, struct vio, __func__, &vio); if (result != VDO_SUCCESS) { - uds_log_error("metadata vio allocation failure %d", result); + vdo_log_error("metadata vio allocation failure %d", result); return result; } @@ -225,7 +225,7 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback, bytes_added = bio_add_page(bio, page, bytes, offset); if (bytes_added != bytes) { - return uds_log_error_strerror(VDO_BIO_CREATION_FAILED, + return vdo_log_error_strerror(VDO_BIO_CREATION_FAILED, "Could only add %i bytes to bio", bytes_added); } @@ -258,18 +258,18 @@ void update_vio_error_stats(struct vio *vio, const char *format, ...) case VDO_NO_SPACE: atomic64_inc(&vdo->stats.no_space_error_count); - priority = UDS_LOG_DEBUG; + priority = VDO_LOG_DEBUG; break; default: - priority = UDS_LOG_ERR; + priority = VDO_LOG_ERR; } if (!__ratelimit(&error_limiter)) return; va_start(args, format); - uds_vlog_strerror(priority, vio->completion.result, UDS_LOGGING_MODULE_NAME, + vdo_vlog_strerror(priority, vio->completion.result, VDO_LOGGING_MODULE_NAME, format, args); va_end(args); } -- cgit v1.2.3