summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 22:38:22 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 22:38:22 +0300
commite30aee9e10bb5168579e047f05c3d13d09e23356 (patch)
tree12371bdcd52d2427cad838201997479e31b6a9c9 /drivers
parent8ff546b801e5cca0337c0f0a7234795d0a6309a1 (diff)
parent6cf18e6927c0b224f972e3042fb85770d63cb9f8 (diff)
downloadlinux-e30aee9e10bb5168579e047f05c3d13d09e23356.tar.xz
Merge tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver patchset for 4.11-rc1. Lots of different driver subsystems updated here: rework for the hyperv subsystem to handle new platforms better, mei and w1 and extcon driver updates, as well as a number of other "minor" driver updates. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (169 commits) goldfish: Sanitize the broken interrupt handler x86/platform/goldfish: Prevent unconditional loading vmbus: replace modulus operation with subtraction vmbus: constify parameters where possible vmbus: expose hv_begin/end_read vmbus: remove conditional locking of vmbus_write vmbus: add direct isr callback mode vmbus: change to per channel tasklet vmbus: put related per-cpu variable together vmbus: callback is in softirq not workqueue binder: Add support for file-descriptor arrays binder: Add support for scatter-gather binder: Add extra size to allocator binder: Refactor binder_transact() binder: Support multiple /dev instances binder: Deal with contexts in debugfs binder: Support multiple context managers binder: Split flat_binder_object auxdisplay: ht16k33: remove private workqueue auxdisplay: ht16k33: rework input device initialization ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/Kconfig12
-rw-r--r--drivers/android/binder.c1001
-rw-r--r--drivers/auxdisplay/ht16k33.c320
-rw-r--r--drivers/char/Kconfig5
-rw-r--r--drivers/char/apm-emulation.c7
-rw-r--r--drivers/char/ds1302.c1
-rw-r--r--drivers/char/mmtimer.c6
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c4
-rw-r--r--drivers/extcon/Kconfig10
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c2
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-arizona.c20
-rw-r--r--drivers/extcon/extcon-axp288.c110
-rw-r--r--drivers/extcon/extcon-intel-int3496.c179
-rw-r--r--drivers/extcon/extcon-max14577.c6
-rw-r--r--drivers/extcon/extcon-max77693.c12
-rw-r--r--drivers/extcon/extcon-max77843.c24
-rw-r--r--drivers/extcon/extcon-palmas.c21
-rw-r--r--drivers/extcon/extcon-rt8973a.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-usb-gpio.c7
-rw-r--r--drivers/extcon/extcon.c43
-rw-r--r--drivers/extcon/extcon.h62
-rw-r--r--drivers/fpga/fpga-mgr.c236
-rw-r--r--drivers/fpga/zynq-fpga.c233
-rw-r--r--drivers/fsi/Kconfig12
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/fsi-core.c59
-rw-r--r--drivers/hv/channel.c82
-rw-r--r--drivers/hv/channel_mgmt.c157
-rw-r--r--drivers/hv/connection.c158
-rw-r--r--drivers/hv/hv.c475
-rw-r--r--drivers/hv/hv_balloon.c1
-rw-r--r--drivers/hv/hv_fcopy.c29
-rw-r--r--drivers/hv/hv_kvp.c47
-rw-r--r--drivers/hv/hv_snapshot.c29
-rw-r--r--drivers/hv/hv_util.c283
-rw-r--r--drivers/hv/hyperv_vmbus.h363
-rw-r--r--drivers/hv/ring_buffer.c73
-rw-r--r--drivers/hv/vmbus_drv.c178
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/memory/ti-aemif.c8
-rw-r--r--drivers/misc/Kconfig49
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/eeprom/Kconfig10
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c1581
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/lkdtm_bugs.c7
-rw-r--r--drivers/misc/lkdtm_core.c4
-rw-r--r--drivers/misc/mei/amthif.c45
-rw-r--r--drivers/misc/mei/bus.c63
-rw-r--r--drivers/misc/mei/client.c145
-rw-r--r--drivers/misc/mei/client.h24
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c53
-rw-r--r--drivers/misc/mei/hw-txe.c14
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/misc/mei/init.c22
-rw-r--r--drivers/misc/mei/interrupt.c36
-rw-r--r--drivers/misc/mei/main.c48
-rw-r--r--drivers/misc/mei/mei_dev.h22
-rw-r--r--drivers/misc/mei/pci-me.c50
-rw-r--r--drivers/misc/mei/pci-txe.c69
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1
-rw-r--r--drivers/misc/panel.c191
-rw-r--r--drivers/misc/sram-exec.c105
-rw-r--r--drivers/misc/sram.c55
-rw-r--r--drivers/misc/sram.h58
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c75
-rw-r--r--drivers/net/hyperv/netvsc.c21
-rw-r--r--drivers/nvmem/core.c45
-rw-r--r--drivers/nvmem/imx-ocotp.c1
-rw-r--r--drivers/platform/goldfish/pdev_bus.c13
-rw-r--r--drivers/uio/uio_hv_generic.c2
-rw-r--r--drivers/vme/vme.c15
-rw-r--r--drivers/w1/masters/ds2490.c141
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2405.c227
-rw-r--r--drivers/w1/w1.c8
-rw-r--r--drivers/w1/w1.h7
-rw-r--r--drivers/w1/w1_family.c7
-rw-r--r--drivers/w1/w1_family.h8
-rw-r--r--drivers/w1/w1_int.c7
-rw-r--r--drivers/w1/w1_int.h7
-rw-r--r--drivers/w1/w1_io.c8
-rw-r--r--drivers/w1/w1_log.h7
-rw-r--r--drivers/w1/w1_netlink.c7
-rw-r--r--drivers/w1/w1_netlink.h7
97 files changed, 5154 insertions, 2438 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e1e2066cecdb..117ca14ccf85 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -202,4 +202,6 @@ source "drivers/hwtracing/intel_th/Kconfig"
source "drivers/fpga/Kconfig"
+source "drivers/fsi/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 060026a02f59..67ce51d62015 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -173,3 +173,4 @@ obj-$(CONFIG_STM) += hwtracing/stm/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
+obj-$(CONFIG_FSI) += fsi/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index bdfc6c6f4f5a..a82fc022d34b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_DEVICES
+ string "Android Binder devices"
+ depends on ANDROID_BINDER_IPC
+ default "binder"
+ ---help---
+ Default value for the binder.devices parameter.
+
+ The binder.devices parameter is a comma-separated list of strings
+ that specifies the names of the binder device nodes that will be
+ created. Each binder device has its own context manager, and is
+ therefore logically separated from the other devices.
+
config ANDROID_BINDER_IPC_32BIT
bool
depends on !64BIT && ANDROID_BINDER_IPC
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 3c71b982bf2a..9451b762fa1c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -50,14 +50,13 @@ static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
+static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
static int binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
@@ -115,6 +114,9 @@ module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(devices, binder_devices_param, charp, 0444);
+
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
@@ -145,6 +147,17 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
binder_stop_on_user_error = 2; \
} while (0)
+#define to_flat_binder_object(hdr) \
+ container_of(hdr, struct flat_binder_object, hdr)
+
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
+
+#define to_binder_buffer_object(hdr) \
+ container_of(hdr, struct binder_buffer_object, hdr)
+
+#define to_binder_fd_array_object(hdr) \
+ container_of(hdr, struct binder_fd_array_object, hdr)
+
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
@@ -158,7 +171,7 @@ enum binder_stat_types {
struct binder_stats {
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int bc[_IOC_NR(BC_REPLY_SG) + 1];
int obj_created[BINDER_STAT_COUNT];
int obj_deleted[BINDER_STAT_COUNT];
};
@@ -186,6 +199,7 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ const char *context_name;
};
struct binder_transaction_log {
int next;
@@ -210,6 +224,18 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
return e;
}
+struct binder_context {
+ struct binder_node *binder_context_mgr_node;
+ kuid_t binder_context_mgr_uid;
+ const char *name;
+};
+
+struct binder_device {
+ struct hlist_node hlist;
+ struct miscdevice miscdev;
+ struct binder_context context;
+};
+
struct binder_work {
struct list_head entry;
enum {
@@ -282,6 +308,7 @@ struct binder_buffer {
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
+ size_t extra_buffers_size;
uint8_t data[0];
};
@@ -325,6 +352,7 @@ struct binder_proc {
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_context *context;
};
enum {
@@ -648,7 +676,9 @@ err_no_vma:
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
- size_t offsets_size, int is_async)
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
{
struct rb_node *n = proc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -656,7 +686,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
struct rb_node *best_fit = NULL;
void *has_page_addr;
void *end_page_addr;
- size_t size;
+ size_t size, data_offsets_size;
if (proc->vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
@@ -664,15 +694,20 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
return NULL;
}
- size = ALIGN(data_size, sizeof(void *)) +
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
- if (size < data_size || size < offsets_size) {
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
proc->pid, data_size, offsets_size);
return NULL;
}
-
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
+ proc->pid, extra_buffers_size);
+ return NULL;
+ }
if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -741,6 +776,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
proc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
+ buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async;
if (is_async) {
proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -815,7 +851,8 @@ static void binder_free_buf(struct binder_proc *proc,
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *));
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -929,8 +966,9 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
if (internal) {
if (target_list == NULL &&
node->internal_strong_refs == 0 &&
- !(node == binder_context_mgr_node &&
- node->has_strong_ref)) {
+ !(node->proc &&
+ node == node->proc->context->binder_context_mgr_node &&
+ node->has_strong_ref)) {
pr_err("invalid inc strong node for %d\n",
node->debug_id);
return -EINVAL;
@@ -1031,6 +1069,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
+ struct binder_context *context = proc->context;
while (*p) {
parent = *p;
@@ -1053,7 +1092,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
@@ -1240,11 +1279,158 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
}
+/**
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * @buffer: binder_buffer that we're parsing.
+ * @offset: offset in the buffer at which to validate an object.
+ *
+ * Return: If there's a valid metadata object at @offset in @buffer, the
+ * size of that object. Otherwise, it returns zero.
+ */
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+{
+ /* Check if we can read a header first */
+ struct binder_object_header *hdr;
+ size_t object_size = 0;
+
+ if (offset > buffer->data_size - sizeof(*hdr) ||
+ buffer->data_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
+ return 0;
+
+ /* Ok, now see if we can read a complete object. */
+ hdr = (struct binder_object_header *)(buffer->data + offset);
+ switch (hdr->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER:
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE:
+ object_size = sizeof(struct flat_binder_object);
+ break;
+ case BINDER_TYPE_FD:
+ object_size = sizeof(struct binder_fd_object);
+ break;
+ case BINDER_TYPE_PTR:
+ object_size = sizeof(struct binder_buffer_object);
+ break;
+ case BINDER_TYPE_FDA:
+ object_size = sizeof(struct binder_fd_array_object);
+ break;
+ default:
+ return 0;
+ }
+ if (offset <= buffer->data_size - object_size &&
+ buffer->data_size >= object_size)
+ return object_size;
+ else
+ return 0;
+}
+
+/**
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @b: binder_buffer containing the object
+ * @index: index in offset array at which the binder_buffer_object is
+ * located
+ * @start: points to the start of the offset array
+ * @num_valid: the number of valid offsets in the offset array
+ *
+ * Return: If @index is within the valid range of the offset array
+ * described by @start and @num_valid, and if there's a valid
+ * binder_buffer_object at the offset found in index @index
+ * of the offset array, that object is returned. Otherwise,
+ * %NULL is returned.
+ * Note that the offset found in index @index itself is not
+ * verified; this function assumes that @num_valid elements
+ * from @start were previously verified to have valid offsets.
+ */
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
+ binder_size_t index,
+ binder_size_t *start,
+ binder_size_t num_valid)
+{
+ struct binder_buffer_object *buffer_obj;
+ binder_size_t *offp;
+
+ if (index >= num_valid)
+ return NULL;
+
+ offp = start + index;
+ buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
+ if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+ return NULL;
+
+ return buffer_obj;
+}
+
+/**
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @b: transaction buffer
+ * @objects_start start of objects buffer
+ * @buffer: binder_buffer_object in which to fix up
+ * @offset: start offset in @buffer to fix up
+ * @last_obj: last binder_buffer_object that we fixed up in
+ * @last_min_offset: minimum fixup offset in @last_obj
+ *
+ * Return: %true if a fixup in buffer @buffer at offset @offset is
+ * allowed.
+ *
+ * For safety reasons, we only allow fixups inside a buffer to happen
+ * at increasing offsets; additionally, we only allow fixup on the last
+ * buffer object that was verified, or one of its parents.
+ *
+ * Example of what is allowed:
+ *
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = C, offset = 0)
+ * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ *
+ * Examples of what is not allowed:
+ *
+ * Decreasing offsets within the same parent:
+ * A
+ * C (parent = A, offset = 16)
+ * B (parent = A, offset = 0) // decreasing offset within A
+ *
+ * Referring to a parent that wasn't the last object or any of its parents:
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = B, offset = 0) // B is not A or any of A's parents
+ */
+static bool binder_validate_fixup(struct binder_buffer *b,
+ binder_size_t *objects_start,
+ struct binder_buffer_object *buffer,
+ binder_size_t fixup_offset,
+ struct binder_buffer_object *last_obj,
+ binder_size_t last_min_offset)
+{
+ if (!last_obj) {
+ /* Nothing to fix up in */
+ return false;
+ }
+
+ while (last_obj != buffer) {
+ /*
+ * Safe to retrieve the parent of last_obj, since it
+ * was already previously verified by the driver.
+ */
+ if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+ return false;
+ last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
+ last_obj = (struct binder_buffer_object *)
+ (b->data + *(objects_start + last_obj->parent));
+ }
+ return (fixup_offset >= last_min_offset);
+}
+
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
{
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1255,28 +1441,30 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (binder_size_t *)(buffer->data +
- ALIGN(buffer->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
- off_end = (void *)offp + buffer->offsets_size;
- for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
+ off_end = (void *)off_start + buffer->offsets_size;
+ for (offp = off_start; offp < off_end; offp++) {
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(buffer, *offp);
- if (*offp > buffer->data_size - sizeof(*fp) ||
- buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- pr_err("transaction release %d bad offset %lld, size %zd\n",
+ if (object_size == 0) {
+ pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)*offp, buffer->data_size);
continue;
}
- fp = (struct flat_binder_object *)(buffer->data + *offp);
- switch (fp->type) {
+ hdr = (struct binder_object_header *)(buffer->data + *offp);
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_node *node = binder_get_node(proc, fp->binder);
+ struct flat_binder_object *fp;
+ struct binder_node *node;
+ fp = to_flat_binder_object(hdr);
+ node = binder_get_node(proc, fp->binder);
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
@@ -1285,15 +1473,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx\n",
node->debug_id, (u64)node->ptr);
- binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
+ 0);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
+ struct flat_binder_object *fp;
struct binder_ref *ref;
+ fp = to_flat_binder_object(hdr);
ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
-
+ hdr->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
@@ -1302,32 +1492,348 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
} break;
- case BINDER_TYPE_FD:
+ case BINDER_TYPE_FD: {
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->handle);
+ " fd %d\n", fp->fd);
if (failed_at)
- task_close_fd(proc, fp->handle);
+ task_close_fd(proc, fp->fd);
+ } break;
+ case BINDER_TYPE_PTR:
+ /*
+ * Nothing to do here, this will get cleaned up when the
+ * transaction buffer gets freed
+ */
break;
-
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda;
+ struct binder_buffer_object *parent;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ size_t fd_index;
+ binder_size_t fd_buf_size;
+
+ fda = to_binder_fd_array_object(hdr);
+ parent = binder_validate_ptr(buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ pr_err("transaction release %d bad parent offset",
+ debug_id);
+ continue;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to kernel address space to access it
+ */
+ parent_buffer = parent->buffer -
+ proc->user_buffer_offset;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ pr_err("transaction release %d invalid number of fds (%lld)\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ pr_err("transaction release %d not enough space for %lld fds in buffer\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
+ task_close_fd(proc, fd_array[fd_index]);
+ } break;
default:
pr_err("transaction release %d bad object type %x\n",
- debug_id, fp->type);
+ debug_id, hdr->type);
break;
}
}
}
+static int binder_translate_binder(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_node *node;
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ node = binder_get_node(proc, fp->binder);
+ if (!node) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (!node)
+ return -ENOMEM;
+
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)fp->binder,
+ node->debug_id, (u64)fp->cookie,
+ (u64)node->cookie);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (!ref)
+ return -EINVAL;
+
+ if (fp->hdr.type == BINDER_TYPE_BINDER)
+ fp->hdr.type = BINDER_TYPE_HANDLE;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
+ fp->handle = ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
+
+ trace_binder_transaction_node_to_ref(t, node, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
+
+ return 0;
+}
+
+static int binder_translate_handle(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE);
+ if (!ref) {
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+ proc->pid, thread->pid, fp->handle);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ if (ref->node->proc == target_proc) {
+ if (fp->hdr.type == BINDER_TYPE_HANDLE)
+ fp->hdr.type = BINDER_TYPE_BINDER;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ trace_binder_transaction_ref_to_node(t, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%016llx\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ (u64)ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (!new_ref)
+ return -EINVAL;
+
+ fp->binder = 0;
+ fp->handle = new_ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL);
+ trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ return 0;
+}
+
+static int binder_translate_fd(int fd,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+ int target_fd;
+ struct file *file;
+ int ret;
+ bool target_allows_fd;
+
+ if (in_reply_to)
+ target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
+ else
+ target_allows_fd = t->buffer->target_node->accept_fds;
+ if (!target_allows_fd) {
+ binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
+ proc->pid, thread->pid,
+ in_reply_to ? "reply" : "transaction",
+ fd);
+ ret = -EPERM;
+ goto err_fd_not_accepted;
+ }
+
+ file = fget(fd);
+ if (!file) {
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+ proc->pid, thread->pid, fd);
+ ret = -EBADF;
+ goto err_fget;
+ }
+ ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ if (ret < 0) {
+ ret = -EPERM;
+ goto err_security;
+ }
+
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ ret = -ENOMEM;
+ goto err_get_unused_fd;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ trace_binder_transaction_fd(t, fd, target_fd);
+ binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
+ fd, target_fd);
+
+ return target_fd;
+
+err_get_unused_fd:
+err_security:
+ fput(file);
+err_fget:
+err_fd_not_accepted:
+ return ret;
+}
+
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+ struct binder_buffer_object *parent,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ binder_size_t fdi, fd_buf_size, num_installed_fds;
+ int target_fd;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to the kernel address space to access it
+ */
+ parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+ binder_user_error("%d:%d parent offset not aligned correctly.\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ for (fdi = 0; fdi < fda->num_fds; fdi++) {
+ target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+ in_reply_to);
+ if (target_fd < 0)
+ goto err_translate_fd_failed;
+ fd_array[fdi] = target_fd;
+ }
+ return 0;
+
+err_translate_fd_failed:
+ /*
+ * Failed to allocate fd or security error, free fds
+ * installed so far.
+ */
+ num_installed_fds = fdi;
+ for (fdi = 0; fdi < num_installed_fds; fdi++)
+ task_close_fd(target_proc, fd_array[fdi]);
+ return target_fd;
+}
+
+static int binder_fixup_parent(struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_buffer_object *bp,
+ binder_size_t *off_start,
+ binder_size_t num_valid,
+ struct binder_buffer_object *last_fixup_obj,
+ binder_size_t last_fixup_min_off)
+{
+ struct binder_buffer_object *parent;
+ u8 *parent_buffer;
+ struct binder_buffer *b = t->buffer;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
+ return 0;
+
+ parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (!binder_validate_fixup(b, off_start,
+ parent, bp->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (parent->length < sizeof(binder_uintptr_t) ||
+ bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
+ /* No space for a pointer here! */
+ binder_user_error("%d:%d got transaction with invalid parent offset\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ parent_buffer = (u8 *)(parent->buffer -
+ target_proc->user_buffer_offset);
+ *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+
+ return 0;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
- struct binder_transaction_data *tr, int reply)
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size)
{
+ int ret;
struct binder_transaction *t;
struct binder_work *tcomplete;
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
+ u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1336,6 +1842,9 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
+ struct binder_buffer_object *last_fixup_obj = NULL;
+ binder_size_t last_fixup_min_off = 0;
+ struct binder_context *context = proc->context;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1344,6 +1853,7 @@ static void binder_transaction(struct binder_proc *proc,
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
+ e->context_name = proc->context->name;
if (reply) {
in_reply_to = thread->transaction_stack;
@@ -1396,7 +1906,7 @@ static void binder_transaction(struct binder_proc *proc,
}
target_node = ref->node;
} else {
- target_node = binder_context_mgr_node;
+ target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
@@ -1463,20 +1973,22 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1492,7 +2004,8 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
- tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ tr->offsets_size, extra_buffers_size,
+ !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
@@ -1505,8 +2018,9 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (binder_size_t *)(t->buffer->data +
- ALIGN(tr->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
+ offp = off_start;
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
@@ -1528,177 +2042,138 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- off_end = (void *)offp + tr->offsets_size;
+ if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
+ binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
+ proc->pid, thread->pid,
+ (u64)extra_buffers_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)off_start + tr->offsets_size;
+ sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
+ sg_buf_end = sg_bufp + extra_buffers_size;
off_min = 0;
for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(t->buffer, *offp);
- if (*offp > t->buffer->data_size - sizeof(*fp) ||
- *offp < off_min ||
- t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ if (object_size == 0 || *offp < off_min) {
+ binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
- (u64)(t->buffer->data_size -
- sizeof(*fp)));
+ (u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- fp = (struct flat_binder_object *)(t->buffer->data + *offp);
- off_min = *offp + sizeof(struct flat_binder_object);
- switch (fp->type) {
+
+ hdr = (struct binder_object_header *)(t->buffer->data + *offp);
+ off_min = *offp + object_size;
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_ref *ref;
- struct binder_node *node = binder_get_node(proc, fp->binder);
+ struct flat_binder_object *fp;
- if (node == NULL) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
- if (node == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_new_node_failed;
- }
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
- }
- if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
- proc->pid, thread->pid,
- (u64)fp->binder, node->debug_id,
- (u64)fp->cookie, (u64)node->cookie);
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- if (security_binder_transfer_binder(proc->tsk,
- target_proc->tsk)) {
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_binder(fp, t, thread);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
+ goto err_translate_failed;
}
- ref = binder_get_ref_for_node(target_proc, node);
- if (ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- if (fp->type == BINDER_TYPE_BINDER)
- fp->type = BINDER_TYPE_HANDLE;
- else
- fp->type = BINDER_TYPE_WEAK_HANDLE;
- fp->binder = 0;
- fp->handle = ref->desc;
- fp->cookie = 0;
- binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
- &thread->todo);
-
- trace_binder_transaction_node_to_ref(t, node, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%016llx -> ref %d desc %d\n",
- node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref;
+ struct flat_binder_object *fp;
- ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_handle(fp, t, thread);
+ if (ret < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_translate_failed;
+ }
+ } break;
- if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %d\n",
- proc->pid,
- thread->pid, fp->handle);
+ case BINDER_TYPE_FD: {
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+ int target_fd = binder_translate_fd(fp->fd, t, thread,
+ in_reply_to);
+
+ if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
+ goto err_translate_failed;
}
- if (security_binder_transfer_binder(proc->tsk,
- target_proc->tsk)) {
+ fp->pad_binder = 0;
+ fp->fd = target_fd;
+ } break;
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda =
+ to_binder_fd_array_object(hdr);
+ struct binder_buffer_object *parent =
+ binder_validate_ptr(t->buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
+ goto err_bad_parent;
}
- if (ref->node->proc == target_proc) {
- if (fp->type == BINDER_TYPE_HANDLE)
- fp->type = BINDER_TYPE_BINDER;
- else
- fp->type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
- } else {
- struct binder_ref *new_ref;
-
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (new_ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- fp->binder = 0;
- fp->handle = new_ref->desc;
- fp->cookie = 0;
- binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
- trace_binder_transaction_ref_to_ref(t, ref,
- new_ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ if (!binder_validate_fixup(t->buffer, off_start,
+ parent, fda->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_parent;
}
- } break;
-
- case BINDER_TYPE_FD: {
- int target_fd;
- struct file *file;
-
- if (reply) {
- if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
- return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
- }
- } else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
+ ret = binder_translate_fd_array(fda, parent, t, thread,
+ in_reply_to);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
+ goto err_translate_failed;
}
-
- file = fget(fp->handle);
- if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %d\n",
- proc->pid, thread->pid, fp->handle);
+ last_fixup_obj = parent;
+ last_fixup_min_off =
+ fda->parent_offset + sizeof(u32) * fda->num_fds;
+ } break;
+ case BINDER_TYPE_PTR: {
+ struct binder_buffer_object *bp =
+ to_binder_buffer_object(hdr);
+ size_t buf_left = sg_buf_end - sg_bufp;
+
+ if (bp->length > buf_left) {
+ binder_user_error("%d:%d got transaction with too large buffer\n",
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- goto err_fget_failed;
+ goto err_bad_offset;
}
- if (security_binder_transfer_file(proc->tsk,
- target_proc->tsk,
- file) < 0) {
- fput(file);
+ if (copy_from_user(sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
+ goto err_copy_data_failed;
}
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
- if (target_fd < 0) {
- fput(file);
+ /* Fixup buffer pointer to target proc address space */
+ bp->buffer = (uintptr_t)sg_bufp +
+ target_proc->user_buffer_offset;
+ sg_bufp += ALIGN(bp->length, sizeof(u64));
+
+ ret = binder_fixup_parent(t, thread, bp, off_start,
+ offp - off_start,
+ last_fixup_obj,
+ last_fixup_min_off);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
+ goto err_translate_failed;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fp->handle, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d -> %d\n", fp->handle, target_fd);
- /* TODO: fput? */
- fp->binder = 0;
- fp->handle = target_fd;
+ last_fixup_obj = bp;
+ last_fixup_min_off = 0;
} break;
-
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
- proc->pid, thread->pid, fp->type);
+ proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
@@ -1728,14 +2203,10 @@ static void binder_transaction(struct binder_proc *proc,
wake_up_interruptible(target_wait);
return;
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
+err_translate_failed:
err_bad_object_type:
err_bad_offset:
+err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1779,6 +2250,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_size_t *consumed)
{
uint32_t cmd;
+ struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1805,10 +2277,10 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (target == 0 && binder_context_mgr_node &&
+ if (target == 0 && context->binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc,
- binder_context_mgr_node);
+ context->binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
@@ -1953,6 +2425,17 @@ static int binder_thread_write(struct binder_proc *proc,
break;
}
+ case BC_TRANSACTION_SG:
+ case BC_REPLY_SG: {
+ struct binder_transaction_data_sg tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr.transaction_data,
+ cmd == BC_REPLY_SG, tr.buffers_size);
+ break;
+ }
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
@@ -1960,7 +2443,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
- binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ binder_transaction(proc, thread, &tr,
+ cmd == BC_REPLY, 0);
break;
}
@@ -2714,9 +3198,11 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
+ struct binder_context *context = proc->context;
+
kuid_t curr_euid = current_euid();
- if (binder_context_mgr_node != NULL) {
+ if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
@@ -2724,27 +3210,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
- if (uid_valid(binder_context_mgr_uid)) {
- if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+ if (uid_valid(context->binder_context_mgr_uid)) {
+ if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
- binder_context_mgr_uid));
+ context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
- binder_context_mgr_uid = curr_euid;
+ context->binder_context_mgr_uid = curr_euid;
}
- binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (binder_context_mgr_node == NULL) {
+ context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
+ if (!context->binder_context_mgr_node) {
ret = -ENOMEM;
goto out;
}
- binder_context_mgr_node->local_weak_refs++;
- binder_context_mgr_node->local_strong_refs++;
- binder_context_mgr_node->has_strong_ref = 1;
- binder_context_mgr_node->has_weak_ref = 1;
+ context->binder_context_mgr_node->local_weak_refs++;
+ context->binder_context_mgr_node->local_strong_refs++;
+ context->binder_context_mgr_node->has_strong_ref = 1;
+ context->binder_context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
@@ -2969,6 +3455,7 @@ err_bad_arg:
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
+ struct binder_device *binder_dev;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
@@ -2982,6 +3469,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
+ binder_dev = container_of(filp->private_data, struct binder_device,
+ miscdev);
+ proc->context = &binder_dev->context;
binder_lock(__func__);
@@ -2997,8 +3487,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * proc debug entries are shared between contexts, so
+ * this will fail if the process tries to open the driver
+ * again with a different context. The priting code will
+ * anyway print all contexts that a given PID has, so this
+ * is not a problem.
+ */
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
- binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ binder_debugfs_dir_entry_proc,
+ (void *)(unsigned long)proc->pid,
+ &binder_proc_fops);
}
return 0;
@@ -3091,6 +3590,7 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_transaction *t;
+ struct binder_context *context = proc->context;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
@@ -3100,11 +3600,12 @@ static void binder_deferred_release(struct binder_proc *proc)
hlist_del(&proc->proc_node);
- if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ if (context->binder_context_mgr_node &&
+ context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%s: %d context_mgr_node gone\n",
__func__, proc->pid);
- binder_context_mgr_node = NULL;
+ context->binder_context_mgr_node = NULL;
}
threads = 0;
@@ -3391,6 +3892,7 @@ static void print_binder_proc(struct seq_file *m,
size_t header_pos;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3460,7 +3962,9 @@ static const char * const binder_command_strings[] = {
"BC_EXIT_LOOPER",
"BC_REQUEST_DEATH_NOTIFICATION",
"BC_CLEAR_DEATH_NOTIFICATION",
- "BC_DEAD_BINDER_DONE"
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
};
static const char * const binder_objstat_strings[] = {
@@ -3515,6 +4019,7 @@ static void print_binder_proc_stats(struct seq_file *m,
int count, strong, weak;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
@@ -3622,23 +4127,18 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
- struct binder_proc *proc = m->private;
+ int pid = (unsigned long)m->private;
int do_lock = !binder_debug_no_lock;
- bool valid_proc = false;
if (do_lock)
binder_lock(__func__);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
- if (itr == proc) {
- valid_proc = true;
- break;
+ if (itr->pid == pid) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, itr, 1);
}
}
- if (valid_proc) {
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, proc, 1);
- }
if (do_lock)
binder_unlock(__func__);
return 0;
@@ -3648,11 +4148,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
seq_printf(m,
- "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
- e->from_thread, e->to_proc, e->to_thread, e->to_node,
- e->target_handle, e->data_size, e->offsets_size);
+ e->from_thread, e->to_proc, e->to_thread, e->context_name,
+ e->to_node, e->target_handle, e->data_size, e->offsets_size);
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3680,26 +4180,50 @@ static const struct file_operations binder_fops = {
.release = binder_release,
};
-static struct miscdevice binder_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "binder",
- .fops = &binder_fops
-};
-
BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
+static int __init init_binder_device(const char *name)
+{
+ int ret;
+ struct binder_device *binder_device;
+
+ binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+ if (!binder_device)
+ return -ENOMEM;
+
+ binder_device->miscdev.fops = &binder_fops;
+ binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+ binder_device->miscdev.name = name;
+
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
+ binder_device->context.name = name;
+
+ ret = misc_register(&binder_device->miscdev);
+ if (ret < 0) {
+ kfree(binder_device);
+ return ret;
+ }
+
+ hlist_add_head(&binder_device->hlist, &binder_devices);
+
+ return ret;
+}
+
static int __init binder_init(void)
{
int ret;
+ char *device_name, *device_names;
+ struct binder_device *device;
+ struct hlist_node *tmp;
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
- ret = misc_register(&binder_miscdev);
+
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
@@ -3727,6 +4251,35 @@ static int __init binder_init(void)
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
+
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
+ strcpy(device_names, binder_devices_param);
+
+ while ((device_name = strsep(&device_names, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+ }
+
+ return ret;
+
+err_init_binder_device_failed:
+ hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
+ misc_deregister(&device->miscdev);
+ hlist_del(&device->hlist);
+ kfree(device);
+ }
+err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
return ret;
}
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index eeb323f56c07..f66b45b235b0 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -56,14 +56,16 @@
#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
struct ht16k33_keypad {
+ struct i2c_client *client;
struct input_dev *dev;
- spinlock_t lock;
- struct delayed_work work;
uint32_t cols;
uint32_t rows;
uint32_t row_shift;
uint32_t debounce_ms;
uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+
+ wait_queue_head_t wait;
+ bool stopped;
};
struct ht16k33_fbdev {
@@ -78,7 +80,6 @@ struct ht16k33_priv {
struct i2c_client *client;
struct ht16k33_keypad keypad;
struct ht16k33_fbdev fbdev;
- struct workqueue_struct *workqueue;
};
static struct fb_fix_screeninfo ht16k33_fb_fix = {
@@ -124,16 +125,8 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- queue_delayed_work(priv->workqueue, &fbdev->work,
- msecs_to_jiffies(HZ / fbdev->refresh_rate));
-}
-
-static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
-{
- struct ht16k33_keypad *keypad = &priv->keypad;
-
- queue_delayed_work(priv->workqueue, &keypad->work,
- msecs_to_jiffies(keypad->debounce_ms));
+ schedule_delayed_work(&fbdev->work,
+ msecs_to_jiffies(HZ / fbdev->refresh_rate));
}
/*
@@ -182,32 +175,6 @@ requeue:
ht16k33_fb_queue(priv);
}
-static int ht16k33_keypad_start(struct input_dev *dev)
-{
- struct ht16k33_priv *priv = input_get_drvdata(dev);
- struct ht16k33_keypad *keypad = &priv->keypad;
-
- /*
- * Schedule an immediate key scan to capture current key state;
- * columns will be activated and IRQs be enabled after the scan.
- */
- queue_delayed_work(priv->workqueue, &keypad->work, 0);
- return 0;
-}
-
-static void ht16k33_keypad_stop(struct input_dev *dev)
-{
- struct ht16k33_priv *priv = input_get_drvdata(dev);
- struct ht16k33_keypad *keypad = &priv->keypad;
-
- cancel_delayed_work(&keypad->work);
- /*
- * ht16k33_keypad_scan() will leave IRQs enabled;
- * we should disable them now.
- */
- disable_irq_nosync(priv->client->irq);
-}
-
static int ht16k33_initialize(struct ht16k33_priv *priv)
{
uint8_t byte;
@@ -233,61 +200,6 @@ static int ht16k33_initialize(struct ht16k33_priv *priv)
return i2c_smbus_write_byte(priv->client, byte);
}
-/*
- * This gets the keys from keypad and reports it to input subsystem
- */
-static void ht16k33_keypad_scan(struct work_struct *work)
-{
- struct ht16k33_keypad *keypad =
- container_of(work, struct ht16k33_keypad, work.work);
- struct ht16k33_priv *priv =
- container_of(keypad, struct ht16k33_priv, keypad);
- const unsigned short *keycodes = keypad->dev->keycode;
- uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
- uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
- int row, col, code;
- bool reschedule = false;
-
- if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
- dev_err(&priv->client->dev, "Failed to read key data\n");
- goto end;
- }
-
- for (col = 0; col < keypad->cols; col++) {
- new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
- if (new_state[col])
- reschedule = true;
- bits_changed = keypad->last_key_state[col] ^ new_state[col];
-
- while (bits_changed) {
- row = ffs(bits_changed) - 1;
- code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
- input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
- input_report_key(keypad->dev, keycodes[code],
- new_state[col] & BIT(row));
- bits_changed &= ~BIT(row);
- }
- }
- input_sync(keypad->dev);
- memcpy(keypad->last_key_state, new_state, sizeof(new_state));
-
-end:
- if (reschedule)
- ht16k33_keypad_queue(priv);
- else
- enable_irq(priv->client->irq);
-}
-
-static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
-{
- struct ht16k33_priv *priv = dev;
-
- disable_irq_nosync(priv->client->irq);
- ht16k33_keypad_queue(priv);
-
- return IRQ_HANDLED;
-}
-
static int ht16k33_bl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
@@ -334,15 +246,152 @@ static struct fb_ops ht16k33_fb_ops = {
.fb_mmap = ht16k33_mmap,
};
+/*
+ * This gets the keys from keypad and reports it to input subsystem.
+ * Returns true if a key is pressed.
+ */
+static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
+{
+ const unsigned short *keycodes = keypad->dev->keycode;
+ u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+ u8 data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+ unsigned long bits_changed;
+ int row, col, code;
+ bool pressed = false;
+
+ if (i2c_smbus_read_i2c_block_data(keypad->client, 0x40, 6, data) != 6) {
+ dev_err(&keypad->client->dev, "Failed to read key data\n");
+ return false;
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+ if (new_state[col])
+ pressed = true;
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+ for_each_set_bit(row, &bits_changed, BITS_PER_LONG) {
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+ input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad->dev, keycodes[code],
+ new_state[col] & BIT(row));
+ }
+ }
+ input_sync(keypad->dev);
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+ return pressed;
+}
+
+static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev)
+{
+ struct ht16k33_keypad *keypad = dev;
+
+ do {
+ wait_event_timeout(keypad->wait, keypad->stopped,
+ msecs_to_jiffies(keypad->debounce_ms));
+ if (keypad->stopped)
+ break;
+ } while (ht16k33_keypad_scan(keypad));
+
+ return IRQ_HANDLED;
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+ struct ht16k33_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = false;
+ mb();
+ enable_irq(keypad->client->irq);
+
+ return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+ struct ht16k33_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = true;
+ mb();
+ wake_up(&keypad->wait);
+ disable_irq(keypad->client->irq);
+}
+
+static int ht16k33_keypad_probe(struct i2c_client *client,
+ struct ht16k33_keypad *keypad)
+{
+ struct device_node *node = client->dev.of_node;
+ u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+ u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+ int err;
+
+ keypad->client = client;
+ init_waitqueue_head(&keypad->wait);
+
+ keypad->dev = devm_input_allocate_device(&client->dev);
+ if (!keypad->dev)
+ return -ENOMEM;
+
+ input_set_drvdata(keypad->dev, keypad);
+
+ keypad->dev->name = DRIVER_NAME"-keypad";
+ keypad->dev->id.bustype = BUS_I2C;
+ keypad->dev->open = ht16k33_keypad_start;
+ keypad->dev->close = ht16k33_keypad_stop;
+
+ if (!of_get_property(node, "linux,no-autorepeat", NULL))
+ __set_bit(EV_REP, keypad->dev->evbit);
+
+ err = of_property_read_u32(node, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ if (err) {
+ dev_err(&client->dev, "key debounce delay not specified\n");
+ return err;
+ }
+
+ err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+ if (err)
+ return err;
+
+ keypad->rows = rows;
+ keypad->cols = cols;
+ keypad->row_shift = get_count_order(cols);
+
+ err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+ keypad->dev);
+ if (err) {
+ dev_err(&client->dev, "failed to build keymap\n");
+ return err;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, ht16k33_keypad_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ DRIVER_NAME, keypad);
+ if (err) {
+ dev_err(&client->dev, "irq request failed %d, error %d\n",
+ client->irq, err);
+ return err;
+ }
+
+ ht16k33_keypad_stop(keypad->dev);
+
+ err = input_register_device(keypad->dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int ht16k33_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err;
- uint32_t rows, cols, dft_brightness;
+ uint32_t dft_brightness;
struct backlight_device *bl;
struct backlight_properties bl_props;
struct ht16k33_priv *priv;
- struct ht16k33_keypad *keypad;
struct ht16k33_fbdev *fbdev;
struct device_node *node = client->dev.of_node;
@@ -363,23 +412,16 @@ static int ht16k33_probe(struct i2c_client *client,
priv->client = client;
i2c_set_clientdata(client, priv);
fbdev = &priv->fbdev;
- keypad = &priv->keypad;
-
- priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
- if (priv->workqueue == NULL)
- return -ENOMEM;
err = ht16k33_initialize(priv);
if (err)
- goto err_destroy_wq;
+ return err;
/* Framebuffer (2 bytes per column) */
BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
- if (!fbdev->buffer) {
- err = -ENOMEM;
- goto err_free_fbdev;
- }
+ if (!fbdev->buffer)
+ return -ENOMEM;
fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
if (!fbdev->cache) {
@@ -415,59 +457,7 @@ static int ht16k33_probe(struct i2c_client *client,
if (err)
goto err_fbdev_info;
- /* Keypad */
- keypad->dev = devm_input_allocate_device(&client->dev);
- if (!keypad->dev) {
- err = -ENOMEM;
- goto err_fbdev_unregister;
- }
-
- keypad->dev->name = DRIVER_NAME"-keypad";
- keypad->dev->id.bustype = BUS_I2C;
- keypad->dev->open = ht16k33_keypad_start;
- keypad->dev->close = ht16k33_keypad_stop;
-
- if (!of_get_property(node, "linux,no-autorepeat", NULL))
- __set_bit(EV_REP, keypad->dev->evbit);
-
- err = of_property_read_u32(node, "debounce-delay-ms",
- &keypad->debounce_ms);
- if (err) {
- dev_err(&client->dev, "key debounce delay not specified\n");
- goto err_fbdev_unregister;
- }
-
- err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- ht16k33_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- DRIVER_NAME, priv);
- if (err) {
- dev_err(&client->dev, "irq request failed %d, error %d\n",
- client->irq, err);
- goto err_fbdev_unregister;
- }
-
- disable_irq_nosync(client->irq);
- rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
- cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
- err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
- if (err)
- goto err_fbdev_unregister;
-
- err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
- keypad->dev);
- if (err) {
- dev_err(&client->dev, "failed to build keymap\n");
- goto err_fbdev_unregister;
- }
-
- input_set_drvdata(keypad->dev, priv);
- keypad->rows = rows;
- keypad->cols = cols;
- keypad->row_shift = get_count_order(cols);
- INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
-
- err = input_register_device(keypad->dev);
+ err = ht16k33_keypad_probe(client, &priv->keypad);
if (err)
goto err_fbdev_unregister;
@@ -482,7 +472,7 @@ static int ht16k33_probe(struct i2c_client *client,
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
err = PTR_ERR(bl);
- goto err_keypad_unregister;
+ goto err_fbdev_unregister;
}
err = of_property_read_u32(node, "default-brightness-level",
@@ -502,18 +492,12 @@ static int ht16k33_probe(struct i2c_client *client,
ht16k33_fb_queue(priv);
return 0;
-err_keypad_unregister:
- input_unregister_device(keypad->dev);
err_fbdev_unregister:
unregister_framebuffer(fbdev->info);
err_fbdev_info:
framebuffer_release(fbdev->info);
err_fbdev_buffer:
free_page((unsigned long) fbdev->buffer);
-err_free_fbdev:
- kfree(fbdev);
-err_destroy_wq:
- destroy_workqueue(priv->workqueue);
return err;
}
@@ -521,17 +505,13 @@ err_destroy_wq:
static int ht16k33_remove(struct i2c_client *client)
{
struct ht16k33_priv *priv = i2c_get_clientdata(client);
- struct ht16k33_keypad *keypad = &priv->keypad;
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- ht16k33_keypad_stop(keypad->dev);
-
cancel_delayed_work(&fbdev->work);
unregister_framebuffer(fbdev->info);
framebuffer_release(fbdev->info);
free_page((unsigned long) fbdev->buffer);
- destroy_workqueue(priv->workqueue);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fde005ef9d36..4ee2a10207d0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -571,9 +571,12 @@ config TELCLOCK
controlling the behavior of this hardware.
config DEVPORT
- bool
+ bool "/dev/port character device"
depends on ISA || PCI
default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index dd9dfa15e9d1..1dfb9f8de171 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -31,13 +31,6 @@
#include <linux/kthread.h>
#include <linux/delay.h>
-
-/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV 134
-
/*
* One option can be changed at boot time as follows:
* apm=on/off enable/disable APM
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index 7d34b203718a..c614a56e68cc 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/bcd.h>
#include <linux/mutex.h>
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index f786b18ac500..b708c85dc9c1 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -463,9 +463,9 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
}
static struct miscdevice mmtimer_miscdev = {
- SGI_MMTIMER,
- MMTIMER_NAME,
- &mmtimer_fops
+ .minor = SGI_MMTIMER,
+ .name = MMTIMER_NAME,
+ .fops = &mmtimer_fops
};
static struct timespec sgi_clock_offset;
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index 53c3882e4981..35981cae1afa 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -269,7 +269,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
{
int status;
s32 buffer_count = 0;
- s32 num_writes = 0;
bool dirty = false;
u32 i;
void __iomem *base_address = drvdata->base_address;
@@ -298,7 +297,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
}
buffer_count = 0;
- num_writes++;
dirty = false;
}
@@ -328,7 +326,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
{
int status;
s32 buffer_count = 0;
- s32 read_count = 0;
u32 i;
void __iomem *base_address = drvdata->base_address;
@@ -353,7 +350,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
}
buffer_count = 0;
- read_count++;
}
/* Copy data from bram */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 04788d92ea52..96bbae579c0b 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -42,6 +42,16 @@ config EXTCON_GPIO
Say Y here to enable GPIO based extcon support. Note that GPIO
extcon supports single state per extcon instance.
+config EXTCON_INTEL_INT3496
+ tristate "Intel INT3496 ACPI device extcon driver"
+ depends on GPIOLIB && ACPI
+ help
+ Say Y here to enable extcon support for USB OTG ports controlled by
+ an Intel INT3496 ACPI device.
+
+ This ACPI device is typically found on Intel Baytrail or Cherrytrail
+ based tablets, or other Baytrail / Cherrytrail devices.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 31a0a999c4fb..237ac3f953c2 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
+obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index e686acd1c459..b40eb1805927 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -14,7 +14,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include "extcon.h"
static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
{
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index bc538708c753..6f6537ab0a79 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -67,7 +67,7 @@ static void adc_jack_handler(struct work_struct *work)
ret = iio_read_channel_raw(data->chan, &adc_val);
if (ret < 0) {
- dev_err(&data->edev->dev, "read channel() error: %d\n", ret);
+ dev_err(data->dev, "read channel() error: %d\n", ret);
return;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index d836d4ce5ee4..ed78b7c26627 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -236,12 +236,8 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
mode %= info->micd_num_modes;
- if (arizona->pdata.micd_pol_gpio > 0)
- gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
- info->micd_modes[mode].gpio);
- else
- gpiod_set_value_cansleep(info->micd_pol_gpio,
- info->micd_modes[mode].gpio);
+ gpiod_set_value_cansleep(info->micd_pol_gpio,
+ info->micd_modes[mode].gpio);
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
@@ -1412,21 +1408,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1,
ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw);
- if (arizona->pdata.micd_pol_gpio > 0) {
+ if (pdata->micd_pol_gpio > 0) {
if (info->micd_modes[0].gpio)
mode = GPIOF_OUT_INIT_HIGH;
else
mode = GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&pdev->dev,
- arizona->pdata.micd_pol_gpio,
- mode,
- "MICD polarity");
+ ret = devm_gpio_request_one(&pdev->dev, pdata->micd_pol_gpio,
+ mode, "MICD polarity");
if (ret != 0) {
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
- arizona->pdata.micd_pol_gpio, ret);
+ pdata->micd_pol_gpio, ret);
goto err_register;
}
+
+ info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
} else {
if (info->micd_modes[0].gpio)
mode = GPIOD_OUT_HIGH;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 42f41e808292..f4fd03e58e37 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/property.h>
-#include <linux/usb/phy.h>
#include <linux/notifier.h>
#include <linux/extcon.h>
#include <linux/regmap.h>
@@ -71,12 +70,6 @@
#define DET_STAT_CDP 2
#define DET_STAT_DCP 3
-/* IRQ enable-1 register */
-#define PWRSRC_IRQ_CFG_MASK (BIT(4)|BIT(3)|BIT(2))
-
-/* IRQ enable-6 register */
-#define BC12_IRQ_CFG_MASK BIT(1)
-
enum axp288_extcon_reg {
AXP288_PS_STAT_REG = 0x00,
AXP288_PS_BOOT_REASON_REG = 0x02,
@@ -84,8 +77,6 @@ enum axp288_extcon_reg {
AXP288_BC_VBUS_CNTL_REG = 0x2d,
AXP288_BC_USB_STAT_REG = 0x2e,
AXP288_BC_DET_STAT_REG = 0x2f,
- AXP288_PWRSRC_IRQ_CFG_REG = 0x40,
- AXP288_BC12_IRQ_CFG_REG = 0x45,
};
enum axp288_mux_select {
@@ -105,6 +96,7 @@ static const unsigned int axp288_extcon_cables[] = {
EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_DCP,
+ EXTCON_USB,
EXTCON_NONE,
};
@@ -112,11 +104,11 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct axp288_extcon_pdata *pdata;
+ struct gpio_desc *gpio_mux_cntl;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
struct notifier_block extcon_nb;
- struct usb_phy *otg;
+ unsigned int previous_cable;
};
/* Power up/down reason string array */
@@ -156,10 +148,9 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
{
- static bool notify_otg, notify_charger;
- static unsigned int cable;
int ret, stat, cfg, pwr_stat;
u8 chrg_type;
+ unsigned int cable = info->previous_cable;
bool vbus_attach = false;
ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
@@ -168,9 +159,9 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
return ret;
}
- vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+ vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
if (!vbus_attach)
- goto notify_otg;
+ goto no_vbus;
/* Check charger detection completion status */
ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
@@ -190,19 +181,14 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
switch (chrg_type) {
case DET_STAT_SDP:
dev_dbg(info->dev, "sdp cable is connected\n");
- notify_otg = true;
- notify_charger = true;
cable = EXTCON_CHG_USB_SDP;
break;
case DET_STAT_CDP:
dev_dbg(info->dev, "cdp cable is connected\n");
- notify_otg = true;
- notify_charger = true;
cable = EXTCON_CHG_USB_CDP;
break;
case DET_STAT_DCP:
dev_dbg(info->dev, "dcp cable is connected\n");
- notify_charger = true;
cable = EXTCON_CHG_USB_DCP;
break;
default:
@@ -210,27 +196,28 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
"disconnect or unknown or ID event\n");
}
-notify_otg:
- if (notify_otg) {
- /*
- * If VBUS is absent Connect D+/D- lines to PMIC for BC
- * detection. Else connect them to SOC for USB communication.
- */
- if (info->pdata->gpio_mux_cntl)
- gpiod_set_value(info->pdata->gpio_mux_cntl,
- vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
- : EXTCON_GPIO_MUX_SEL_PMIC);
-
- atomic_notifier_call_chain(&info->otg->notifier,
- vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL);
- }
-
- if (notify_charger)
+no_vbus:
+ /*
+ * If VBUS is absent Connect D+/D- lines to PMIC for BC
+ * detection. Else connect them to SOC for USB communication.
+ */
+ if (info->gpio_mux_cntl)
+ gpiod_set_value(info->gpio_mux_cntl,
+ vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
+ : EXTCON_GPIO_MUX_SEL_PMIC);
+
+ extcon_set_state_sync(info->edev, info->previous_cable, false);
+ if (info->previous_cable == EXTCON_CHG_USB_SDP)
+ extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+ if (vbus_attach) {
extcon_set_state_sync(info->edev, cable, vbus_attach);
+ if (cable == EXTCON_CHG_USB_SDP)
+ extcon_set_state_sync(info->edev, EXTCON_USB,
+ vbus_attach);
- /* Clear the flags on disconnect event */
- if (!vbus_attach)
- notify_otg = notify_charger = false;
+ info->previous_cable = cable;
+ }
return 0;
@@ -253,15 +240,10 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void axp288_extcon_enable_irq(struct axp288_extcon_info *info)
+static void axp288_extcon_enable(struct axp288_extcon_info *info)
{
- /* Unmask VBUS interrupt */
- regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
- PWRSRC_IRQ_CFG_MASK);
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0);
- /* Unmask the BC1.2 complete interrupts */
- regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
/* Enable the charger detection logic */
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, BC_GLOBAL_RUN);
@@ -271,6 +253,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
int ret, i, pirq, gpio;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -280,15 +263,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->pdata = pdev->dev.platform_data;
-
- if (!info->pdata) {
- /* Try ACPI provided pdata via device properties */
- if (!device_property_present(&pdev->dev,
- "axp288_extcon_data\n"))
- dev_err(&pdev->dev, "failed to get platform data\n");
- return -ENODEV;
- }
+ info->previous_cable = EXTCON_NONE;
+ if (pdata)
+ info->gpio_mux_cntl = pdata->gpio_mux_cntl;
+
platform_set_drvdata(pdev, info);
axp288_extcon_log_rsi(info);
@@ -308,23 +286,16 @@ static int axp288_extcon_probe(struct platform_device *pdev)
return ret;
}
- /* Get otg transceiver phy */
- info->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(info->otg)) {
- dev_err(&pdev->dev, "failed to get otg transceiver\n");
- return PTR_ERR(info->otg);
- }
-
/* Set up gpio control for USB Mux */
- if (info->pdata->gpio_mux_cntl) {
- gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
+ if (info->gpio_mux_cntl) {
+ gpio = desc_to_gpio(info->gpio_mux_cntl);
ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
if (ret < 0) {
dev_err(&pdev->dev,
"failed to request the gpio=%d\n", gpio);
return ret;
}
- gpiod_direction_output(info->pdata->gpio_mux_cntl,
+ gpiod_direction_output(info->gpio_mux_cntl,
EXTCON_GPIO_MUX_SEL_PMIC);
}
@@ -349,14 +320,21 @@ static int axp288_extcon_probe(struct platform_device *pdev)
}
}
- /* Enable interrupts */
- axp288_extcon_enable_irq(info);
+ /* Start charger cable type detection */
+ axp288_extcon_enable(info);
return 0;
}
+static const struct platform_device_id axp288_extcon_table[] = {
+ { .name = "axp288_extcon" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, axp288_extcon_table);
+
static struct platform_driver axp288_extcon_driver = {
.probe = axp288_extcon_probe,
+ .id_table = axp288_extcon_table,
.driver = {
.name = "axp288_extcon",
},
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
new file mode 100644
index 000000000000..a3131b036de6
--- /dev/null
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -0,0 +1,179 @@
+/*
+ * Intel INT3496 ACPI device extcon driver
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on android x86 kernel code which is:
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/extcon.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define INT3496_GPIO_USB_ID 0
+#define INT3496_GPIO_VBUS_EN 1
+#define INT3496_GPIO_USB_MUX 2
+#define DEBOUNCE_TIME msecs_to_jiffies(50)
+
+struct int3496_data {
+ struct device *dev;
+ struct extcon_dev *edev;
+ struct delayed_work work;
+ struct gpio_desc *gpio_usb_id;
+ struct gpio_desc *gpio_vbus_en;
+ struct gpio_desc *gpio_usb_mux;
+ int usb_id_irq;
+};
+
+static const unsigned int int3496_cable[] = {
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static void int3496_do_usb_id(struct work_struct *work)
+{
+ struct int3496_data *data =
+ container_of(work, struct int3496_data, work.work);
+ int id = gpiod_get_value_cansleep(data->gpio_usb_id);
+
+ /* id == 1: PERIPHERAL, id == 0: HOST */
+ dev_dbg(data->dev, "Connected %s cable\n", id ? "PERIPHERAL" : "HOST");
+
+ /*
+ * Peripheral: set USB mux to peripheral and disable VBUS
+ * Host: set USB mux to host and enable VBUS
+ */
+ if (!IS_ERR(data->gpio_usb_mux))
+ gpiod_direction_output(data->gpio_usb_mux, id);
+
+ if (!IS_ERR(data->gpio_vbus_en))
+ gpiod_direction_output(data->gpio_vbus_en, !id);
+
+ extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
+}
+
+static irqreturn_t int3496_thread_isr(int irq, void *priv)
+{
+ struct int3496_data *data = priv;
+
+ /* Let the pin settle before processing it */
+ mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
+
+ return IRQ_HANDLED;
+}
+
+static int int3496_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct int3496_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
+
+ data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
+ INT3496_GPIO_USB_ID,
+ GPIOD_IN);
+ if (IS_ERR(data->gpio_usb_id)) {
+ ret = PTR_ERR(data->gpio_usb_id);
+ dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
+ return ret;
+ }
+
+ data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
+ if (data->usb_id_irq <= 0) {
+ dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
+ return -EINVAL;
+ }
+
+ data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
+ INT3496_GPIO_VBUS_EN,
+ GPIOD_ASIS);
+ if (IS_ERR(data->gpio_vbus_en))
+ dev_info(dev, "can't request VBUS EN GPIO\n");
+
+ data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
+ INT3496_GPIO_USB_MUX,
+ GPIOD_ASIS);
+ if (IS_ERR(data->gpio_usb_mux))
+ dev_info(dev, "can't request USB MUX GPIO\n");
+
+ /* register extcon device */
+ data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
+ if (IS_ERR(data->edev))
+ return -ENOMEM;
+
+ ret = devm_extcon_dev_register(dev, data->edev);
+ if (ret < 0) {
+ dev_err(dev, "can't register extcon device: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, data->usb_id_irq,
+ NULL, int3496_thread_isr,
+ IRQF_SHARED | IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ dev_name(dev), data);
+ if (ret < 0) {
+ dev_err(dev, "can't request IRQ for USB ID GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* queue initial processing of id-pin */
+ queue_delayed_work(system_wq, &data->work, 0);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int int3496_remove(struct platform_device *pdev)
+{
+ struct int3496_data *data = platform_get_drvdata(pdev);
+
+ devm_free_irq(&pdev->dev, data->usb_id_irq, data);
+ cancel_delayed_work_sync(&data->work);
+
+ return 0;
+}
+
+static struct acpi_device_id int3496_acpi_match[] = {
+ { "INT3496" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
+
+static struct platform_driver int3496_driver = {
+ .driver = {
+ .name = "intel-int3496",
+ .acpi_match_table = int3496_acpi_match,
+ },
+ .probe = int3496_probe,
+ .remove = int3496_remove,
+};
+
+module_platform_driver(int3496_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 12e26c4e7763..f6414b7fa5bc 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -531,8 +531,10 @@ static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
case MAX14577_IRQ_INT1_ADC:
case MAX14577_IRQ_INT1_ADCLOW:
case MAX14577_IRQ_INT1_ADCERR:
- /* Handle all of accessory except for
- type of charger accessory */
+ /*
+ * Handle all of accessory except for
+ * type of charger accessory.
+ */
info->irq_adc = true;
return 1;
case MAX14577_IRQ_INT2_CHGTYP:
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 68dbcb814b2f..62163468f205 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -188,8 +188,10 @@ enum max77693_muic_acc_type {
MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
MAX77693_MUIC_ADC_OPEN,
- /* The below accessories have same ADC value so ADCLow and
- ADC1K bit is used to separate specific accessory */
+ /*
+ * The below accessories have same ADC value so ADCLow and
+ * ADC1K bit is used to separate specific accessory.
+ */
/* ADC|VBVolot|ADCLow|ADC1K| */
MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
@@ -970,8 +972,10 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
case MAX77693_MUIC_IRQ_INT1_ADC1K:
- /* Handle all of accessory except for
- type of charger accessory */
+ /*
+ * Handle all of accessory except for
+ * type of charger accessory.
+ */
ret = max77693_muic_adc_handler(info);
break;
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 5d11fdf36e94..6e722d552cf1 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -97,8 +97,10 @@ enum max77843_muic_accessory_type {
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
MAX77843_MUIC_ADC_OPEN,
- /* The blow accessories should check
- not only ADC value but also ADC1K and VBVolt value. */
+ /*
+ * The below accessories should check
+ * not only ADC value but also ADC1K and VBVolt value.
+ */
/* Offset|ADC1K|VBVolt| */
MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
@@ -265,16 +267,20 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
/* Check GROUND accessory with charger cable */
if (adc == MAX77843_MUIC_ADC_GROUND) {
if (chg_type == MAX77843_MUIC_CHG_NONE) {
- /* The following state when charger cable is
+ /*
+ * The following state when charger cable is
* disconnected but the GROUND accessory still
- * connected */
+ * connected.
+ */
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
} else {
- /* The following state when charger cable is
- * connected on the GROUND accessory */
+ /*
+ * The following state when charger cable is
+ * connected on the GROUND accessory.
+ */
*attached = true;
cable_type = MAX77843_MUIC_CHG_GND;
info->prev_chg_type = MAX77843_MUIC_CHG_GND;
@@ -299,11 +305,13 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
} else {
*attached = true;
- /* Offset|ADC1K|VBVolt|
+ /*
+ * Offset|ADC1K|VBVolt|
* 0x1| 0| 0| USB-HOST
* 0x1| 0| 1| USB-HOST with VB
* 0x1| 1| 0| MHL
- * 0x1| 1| 1| MHL with VB */
+ * 0x1| 1| 1| MHL with VB
+ */
/* Get ADC1K register bit */
gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
MAX77843_MUIC_STATUS1_ADC1K_MASK);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 634ba70782de..ca904e8b3235 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -62,7 +62,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
extcon_set_state_sync(edev, EXTCON_USB, true);
- dev_info(palmas_usb->dev, "USB cable is attached\n");
+ dev_dbg(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious connect event detected\n");
@@ -71,7 +71,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB, false);
- dev_info(palmas_usb->dev, "USB cable is detached\n");
+ dev_dbg(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious disconnect event detected\n");
@@ -99,7 +99,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
- dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
@@ -107,17 +107,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
- dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
- dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
- dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
+ dev_dbg(palmas_usb->dev, " USB-HOST cable is attached\n");
}
return IRQ_HANDLED;
@@ -138,10 +138,10 @@ static void palmas_gpio_id_detect(struct work_struct *work)
if (id) {
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
- dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
} else {
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
- dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+ dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
}
}
@@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
struct palmas_usb *palmas_usb;
int status;
+ if (!palmas) {
+ dev_err(&pdev->dev, "failed to get valid parent\n");
+ return -EINVAL;
+ }
+
palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
if (!palmas_usb)
return -ENOMEM;
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 174c388739ea..3e882aa107e8 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -142,8 +142,10 @@ enum rt8973a_muic_acc_type {
RT8973A_MUIC_ADC_UNKNOWN_ACC_5,
RT8973A_MUIC_ADC_OPEN = 0x1f,
- /* The below accessories has same ADC value (0x1f).
- So, Device type1 is used to separate specific accessory. */
+ /*
+ * The below accessories has same ADC value (0x1f).
+ * So, Device type1 is used to separate specific accessory.
+ */
/* |---------|--ADC| */
/* | [7:5]|[4:0]| */
RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index b22325688503..106ef0297b53 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -135,8 +135,10 @@ enum sm5502_muic_acc_type {
SM5502_MUIC_ADC_AUDIO_TYPE1,
SM5502_MUIC_ADC_OPEN = 0x1f,
- /* The below accessories have same ADC value (0x1f or 0x1e).
- So, Device type1 is used to separate specific accessory. */
+ /*
+ * The below accessories have same ADC value (0x1f or 0x1e).
+ * So, Device type1 is used to separate specific accessory.
+ */
/* |---------|--ADC| */
/* | [7:5]|[4:0]| */
SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index d589c5feff3d..a5e1882b4ca6 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
+#include <linux/pinctrl/consumer.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
@@ -245,6 +246,9 @@ static int usb_extcon_suspend(struct device *dev)
if (info->vbus_gpiod)
disable_irq(info->vbus_irq);
+ if (!device_may_wakeup(dev))
+ pinctrl_pm_select_sleep_state(dev);
+
return ret;
}
@@ -253,6 +257,9 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
+ if (!device_may_wakeup(dev))
+ pinctrl_pm_select_default_state(dev);
+
if (device_may_wakeup(dev)) {
if (info->id_gpiod) {
ret = disable_irq_wake(info->id_irq);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 7c1e3a7b14e0..09ac5e70c2f3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -30,11 +30,12 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
-#include <linux/extcon.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include "extcon.h"
+
#define SUPPORTED_CABLE_MAX 32
#define CABLE_NAME_MAX 30
@@ -59,7 +60,7 @@ struct __extcon_info {
[EXTCON_USB_HOST] = {
.type = EXTCON_TYPE_USB,
.id = EXTCON_USB_HOST,
- .name = "USB_HOST",
+ .name = "USB-HOST",
},
/* Charging external connector */
@@ -98,6 +99,11 @@ struct __extcon_info {
.id = EXTCON_CHG_WPT,
.name = "WPT",
},
+ [EXTCON_CHG_USB_PD] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_PD,
+ .name = "PD",
+ },
/* Jack external connector */
[EXTCON_JACK_MICROPHONE] = {
@@ -906,35 +912,16 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx = -EINVAL;
- if (!nb)
+ if (!edev || !nb)
return -EINVAL;
- if (edev) {
- idx = find_cable_index_by_id(edev, id);
- if (idx < 0)
- return idx;
-
- spin_lock_irqsave(&edev->lock, flags);
- ret = raw_notifier_chain_register(&edev->nh[idx], nb);
- spin_unlock_irqrestore(&edev->lock, flags);
- } else {
- struct extcon_dev *extd;
-
- mutex_lock(&extcon_dev_list_lock);
- list_for_each_entry(extd, &extcon_dev_list, entry) {
- idx = find_cable_index_by_id(extd, id);
- if (idx >= 0)
- break;
- }
- mutex_unlock(&extcon_dev_list_lock);
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
- if (idx >= 0) {
- edev = extd;
- return extcon_register_notifier(extd, id, nb);
- } else {
- ret = -ENODEV;
- }
- }
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = raw_notifier_chain_register(&edev->nh[idx], nb);
+ spin_unlock_irqrestore(&edev->lock, flags);
return ret;
}
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
new file mode 100644
index 000000000000..993ddccafe11
--- /dev/null
+++ b/drivers/extcon/extcon.h
@@ -0,0 +1,62 @@
+#ifndef __LINUX_EXTCON_INTERNAL_H__
+#define __LINUX_EXTCON_INTERNAL_H__
+
+#include <linux/extcon.h>
+
+/**
+ * struct extcon_dev - An extcon device represents one external connector.
+ * @name: The name of this extcon device. Parent device name is
+ * used if NULL.
+ * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
+ * be attached simultaneously. The array should be
+ * ending with NULL or be NULL (no mutually exclusive
+ * cables). For example, if it is { 0x7, 0x30, 0}, then,
+ * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
+ * be attached simulataneously. {0x7, 0} is equivalent to
+ * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
+ * can be no simultaneous connections.
+ * @dev: Device of this extcon.
+ * @state: Attach/detach state of this extcon. Do not provide at
+ * register-time.
+ * @nh: Notifier for the state change events from this extcon
+ * @entry: To support list of extcon devices so that users can
+ * search for extcon devices based on the extcon name.
+ * @lock:
+ * @max_supported: Internal value to store the number of cables.
+ * @extcon_dev_type: Device_type struct to provide attribute_groups
+ * customized for each extcon device.
+ * @cables: Sysfs subdirectories. Each represents one cable.
+ *
+ * In most cases, users only need to provide "User initializing data" of
+ * this struct when registering an extcon. In some exceptional cases,
+ * optional callbacks may be needed. However, the values in "internal data"
+ * are overwritten by register function.
+ */
+struct extcon_dev {
+ /* Optional user initializing data */
+ const char *name;
+ const unsigned int *supported_cable;
+ const u32 *mutually_exclusive;
+
+ /* Internal data. Please do not set. */
+ struct device dev;
+ struct raw_notifier_head *nh;
+ struct list_head entry;
+ int max_supported;
+ spinlock_t lock; /* could be called by irq handler */
+ u32 state;
+
+ /* /sys/class/extcon/.../cable.n/... */
+ struct device_type extcon_dev_type;
+ struct extcon_cable *cables;
+
+ /* /sys/class/extcon/.../mutually_exclusive/... */
+ struct attribute_group attr_g_muex;
+ struct attribute **attrs_muex;
+ struct device_attribute *d_attrs_muex;
+};
+
+#endif /* __LINUX_EXTCON_INTERNAL_H__ */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index f0a69d3e60a5..86d2cb203533 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -25,16 +25,106 @@
#include <linux/of.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
static DEFINE_IDA(fpga_mgr_ida);
static struct class *fpga_mgr_class;
+/*
+ * Call the low level driver's write_init function. This will do the
+ * device-specific things to get the FPGA into the state where it is ready to
+ * receive an FPGA image. The low level driver only gets to see the first
+ * initial_header_size bytes in the buffer.
+ */
+static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_WRITE_INIT;
+ if (!mgr->mops->initial_header_size)
+ ret = mgr->mops->write_init(mgr, info, NULL, 0);
+ else
+ ret = mgr->mops->write_init(
+ mgr, info, buf, min(mgr->mops->initial_header_size, count));
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fpga_mgr_write_init_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
+{
+ struct sg_mapping_iter miter;
+ size_t len;
+ char *buf;
+ int ret;
+
+ if (!mgr->mops->initial_header_size)
+ return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
+
+ /*
+ * First try to use miter to map the first fragment to access the
+ * header, this is the typical path.
+ */
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ if (sg_miter_next(&miter) &&
+ miter.length >= mgr->mops->initial_header_size) {
+ ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
+ miter.length);
+ sg_miter_stop(&miter);
+ return ret;
+ }
+ sg_miter_stop(&miter);
+
+ /* Otherwise copy the fragments into temporary memory. */
+ buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf,
+ mgr->mops->initial_header_size);
+ ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+/*
+ * After all the FPGA image has been written, do the device specific steps to
+ * finish and set the FPGA into operating mode.
+ */
+static int fpga_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
+ ret = mgr->mops->write_complete(mgr, info);
+ if (ret) {
+ dev_err(&mgr->dev, "Error after writing image data to FPGA\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ mgr->state = FPGA_MGR_STATE_OPERATING;
+
+ return 0;
+}
+
/**
- * fpga_mgr_buf_load - load fpga from image in buffer
+ * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list
* @mgr: fpga manager
* @info: fpga image specific information
- * @buf: buffer contain fpga image
- * @count: byte count of buf
+ * @sgt: scatterlist table
*
* Step the low level fpga manager through the device-specific steps of getting
* an FPGA ready to be configured, writing the image to it, then doing whatever
@@ -42,54 +132,139 @@ static struct class *fpga_mgr_class;
* mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
* not an error code.
*
+ * This is the preferred entry point for FPGA programming, it does not require
+ * any contiguous kernel memory.
+ *
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
- const char *buf, size_t count)
+int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
+ struct sg_table *sgt)
{
- struct device *dev = &mgr->dev;
int ret;
- /*
- * Call the low level driver's write_init function. This will do the
- * device-specific things to get the FPGA into the state where it is
- * ready to receive an FPGA image. The low level driver only gets to
- * see the first initial_header_size bytes in the buffer.
- */
- mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- ret = mgr->mops->write_init(mgr, info, buf,
- min(mgr->mops->initial_header_size, count));
+ ret = fpga_mgr_write_init_sg(mgr, info, sgt);
+ if (ret)
+ return ret;
+
+ /* Write the FPGA image to the FPGA. */
+ mgr->state = FPGA_MGR_STATE_WRITE;
+ if (mgr->mops->write_sg) {
+ ret = mgr->mops->write_sg(mgr, sgt);
+ } else {
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ while (sg_miter_next(&miter)) {
+ ret = mgr->mops->write(mgr, miter.addr, miter.length);
+ if (ret)
+ break;
+ }
+ sg_miter_stop(&miter);
+ }
+
if (ret) {
- dev_err(dev, "Error preparing FPGA for writing\n");
- mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
+ dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_ERR;
return ret;
}
+ return fpga_mgr_write_complete(mgr, info);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
+
+static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
+ if (ret)
+ return ret;
+
/*
* Write the FPGA image to the FPGA.
*/
mgr->state = FPGA_MGR_STATE_WRITE;
ret = mgr->mops->write(mgr, buf, count);
if (ret) {
- dev_err(dev, "Error while writing image data to FPGA\n");
+ dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
mgr->state = FPGA_MGR_STATE_WRITE_ERR;
return ret;
}
+ return fpga_mgr_write_complete(mgr, info);
+}
+
+/**
+ * fpga_mgr_buf_load - load fpga from image in buffer
+ * @mgr: fpga manager
+ * @flags: flags setting fpga confuration modes
+ * @buf: buffer contain fpga image
+ * @count: byte count of buf
+ *
+ * Step the low level fpga manager through the device-specific steps of getting
+ * an FPGA ready to be configured, writing the image to it, then doing whatever
+ * post-configuration steps necessary. This code assumes the caller got the
+ * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct page **pages;
+ struct sg_table sgt;
+ const void *p;
+ int nr_pages;
+ int index;
+ int rc;
+
/*
- * After all the FPGA image has been written, do the device specific
- * steps to finish and set the FPGA into operating mode.
+ * This is just a fast path if the caller has already created a
+ * contiguous kernel buffer and the driver doesn't require SG, non-SG
+ * drivers will still work on the slow path.
*/
- mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
- ret = mgr->mops->write_complete(mgr, info);
- if (ret) {
- dev_err(dev, "Error after writing image data to FPGA\n");
- mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
- return ret;
+ if (mgr->mops->write)
+ return fpga_mgr_buf_load_mapped(mgr, info, buf, count);
+
+ /*
+ * Convert the linear kernel pointer into a sg_table of pages for use
+ * by the driver.
+ */
+ nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
+ (unsigned long)buf / PAGE_SIZE;
+ pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ p = buf - offset_in_page(buf);
+ for (index = 0; index < nr_pages; index++) {
+ if (is_vmalloc_addr(p))
+ pages[index] = vmalloc_to_page(p);
+ else
+ pages[index] = kmap_to_page((void *)p);
+ if (!pages[index]) {
+ kfree(pages);
+ return -EFAULT;
+ }
+ p += PAGE_SIZE;
}
- mgr->state = FPGA_MGR_STATE_OPERATING;
- return 0;
+ /*
+ * The temporary pages list is used to code share the merging algorithm
+ * in sg_alloc_table_from_pages
+ */
+ rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf),
+ count, GFP_KERNEL);
+ kfree(pages);
+ if (rc)
+ return rc;
+
+ rc = fpga_mgr_buf_load_sg(mgr, info, &sgt);
+ sg_free_table(&sgt);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
@@ -291,8 +466,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
struct fpga_manager *mgr;
int id, ret;
- if (!mops || !mops->write_init || !mops->write ||
- !mops->write_complete || !mops->state) {
+ if (!mops || !mops->write_complete || !mops->state ||
+ !mops->write_init || (!mops->write && !mops->write_sg) ||
+ (mops->write && mops->write_sg)) {
dev_err(dev, "Attempt to register without fpga_manager_ops\n");
return -EINVAL;
}
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 1812bf7614e1..34cb98139442 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -30,6 +30,7 @@
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/string.h>
+#include <linux/scatterlist.h>
/* Offsets into SLCR regmap */
@@ -80,6 +81,7 @@
/* FPGA init status */
#define STATUS_DMA_Q_F BIT(31)
+#define STATUS_DMA_Q_E BIT(30)
#define STATUS_PCFG_INIT_MASK BIT(4)
/* Interrupt Status/Mask Register Bit definitions */
@@ -89,7 +91,7 @@
#define IXR_D_P_DONE_MASK BIT(12)
/* FPGA programmed */
#define IXR_PCFG_DONE_MASK BIT(2)
-#define IXR_ERROR_FLAGS_MASK 0x00F0F860
+#define IXR_ERROR_FLAGS_MASK 0x00F0C860
#define IXR_ALL_MASK 0xF8F7F87F
/* Miscellaneous constant values */
@@ -98,12 +100,16 @@
#define DMA_INVALID_ADDRESS GENMASK(31, 0)
/* Used to unlock the dev */
#define UNLOCK_MASK 0x757bdf0d
-/* Timeout for DMA to complete */
-#define DMA_DONE_TIMEOUT msecs_to_jiffies(1000)
/* Timeout for polling reset bits */
#define INIT_POLL_TIMEOUT 2500000
/* Delay for polling reset bits */
#define INIT_POLL_DELAY 20
+/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
+ * interrupting
+ */
+#define DMA_SRC_LAST_TRANSFER 1
+/* Timeout for DMA completion */
+#define DMA_TIMEOUT_MS 5000
/* Masks for controlling stuff in SLCR */
/* Disable all Level shifters */
@@ -124,6 +130,11 @@ struct zynq_fpga_priv {
void __iomem *io_base;
struct regmap *slcr;
+ spinlock_t dma_lock;
+ unsigned int dma_elm;
+ unsigned int dma_nelms;
+ struct scatterlist *cur_sg;
+
struct completion dma_done;
};
@@ -143,37 +154,104 @@ static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
timeout_us)
-static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv)
+/* Cause the specified irq mask bits to generate IRQs */
+static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
{
- u32 intr_mask;
-
- intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
- zynq_fpga_write(priv, INT_MASK_OFFSET,
- intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+ zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
}
-static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv)
+/* Must be called with dma_lock held */
+static void zynq_step_dma(struct zynq_fpga_priv *priv)
{
- u32 intr_mask;
+ u32 addr;
+ u32 len;
+ bool first;
+
+ first = priv->dma_elm == 0;
+ while (priv->cur_sg) {
+ /* Feed the DMA queue until it is full. */
+ if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
+ break;
+
+ addr = sg_dma_address(priv->cur_sg);
+ len = sg_dma_len(priv->cur_sg);
+ if (priv->dma_elm + 1 == priv->dma_nelms) {
+ /* The last transfer waits for the PCAP to finish too,
+ * notice this also changes the irq_mask to ignore
+ * IXR_DMA_DONE_MASK which ensures we do not trigger
+ * the completion too early.
+ */
+ addr |= DMA_SRC_LAST_TRANSFER;
+ priv->cur_sg = NULL;
+ } else {
+ priv->cur_sg = sg_next(priv->cur_sg);
+ priv->dma_elm++;
+ }
- intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
- zynq_fpga_write(priv, INT_MASK_OFFSET,
- intr_mask
- & ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK));
+ zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
+ zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
+ zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
+ zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
+ }
+
+ /* Once the first transfer is queued we can turn on the ISR, future
+ * calls to zynq_step_dma will happen from the ISR context. The
+ * dma_lock spinlock guarentees this handover is done coherently, the
+ * ISR enable is put at the end to avoid another CPU spinning in the
+ * ISR on this lock.
+ */
+ if (first && priv->cur_sg) {
+ zynq_fpga_set_irq(priv,
+ IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+ } else if (!priv->cur_sg) {
+ /* The last transfer changes to DMA & PCAP mode since we do
+ * not want to continue until everything has been flushed into
+ * the PCAP.
+ */
+ zynq_fpga_set_irq(priv,
+ IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+ }
}
static irqreturn_t zynq_fpga_isr(int irq, void *data)
{
struct zynq_fpga_priv *priv = data;
+ u32 intr_status;
- /* disable DMA and error IRQs */
- zynq_fpga_mask_irqs(priv);
+ /* If anything other than DMA completion is reported stop and hand
+ * control back to zynq_fpga_ops_write, something went wrong,
+ * otherwise progress the DMA.
+ */
+ spin_lock(&priv->dma_lock);
+ intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+ if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
+ (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
+ zynq_step_dma(priv);
+ spin_unlock(&priv->dma_lock);
+ return IRQ_HANDLED;
+ }
+ spin_unlock(&priv->dma_lock);
+ zynq_fpga_set_irq(priv, 0);
complete(&priv->dma_done);
return IRQ_HANDLED;
}
+/* Sanity check the proposed bitstream. It must start with the sync word in
+ * the correct byte order, and be dword aligned. The input is a Xilinx .bin
+ * file with every 32 bit quantity swapped.
+ */
+static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
+{
+ for (; count >= 4; buf += 4, count -= 4)
+ if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
+ buf[3] == 0xaa)
+ return true;
+ return false;
+}
+
static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count)
@@ -190,6 +268,13 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
/* don't globally reset PL if we're doing partial reconfig */
if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!zynq_fpga_has_sync(buf, count)) {
+ dev_err(&mgr->dev,
+ "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
+ err = -EINVAL;
+ goto out_err;
+ }
+
/* assert AXI interface resets */
regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
FPGA_RST_ALL_MASK);
@@ -259,10 +344,11 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
zynq_fpga_write(priv, CTRL_OFFSET,
(CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
- /* check that we have room in the command queue */
+ /* We expect that the command queue is empty right now. */
status = zynq_fpga_read(priv, STATUS_OFFSET);
- if (status & STATUS_DMA_Q_F) {
- dev_err(&mgr->dev, "DMA command queue full\n");
+ if ((status & STATUS_DMA_Q_F) ||
+ (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
+ dev_err(&mgr->dev, "DMA command queue not right\n");
err = -EBUSY;
goto out_err;
}
@@ -281,26 +367,36 @@ out_err:
return err;
}
-static int zynq_fpga_ops_write(struct fpga_manager *mgr,
- const char *buf, size_t count)
+static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
{
struct zynq_fpga_priv *priv;
+ const char *why;
int err;
- char *kbuf;
- size_t in_count;
- dma_addr_t dma_addr;
- u32 transfer_length;
u32 intr_status;
+ unsigned long timeout;
+ unsigned long flags;
+ struct scatterlist *sg;
+ int i;
- in_count = count;
priv = mgr->priv;
- kbuf =
- dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
+ /* The hardware can only DMA multiples of 4 bytes, and it requires the
+ * starting addresses to be aligned to 64 bits (UG585 pg 212).
+ */
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ if ((sg->offset % 8) || (sg->length % 4)) {
+ dev_err(&mgr->dev,
+ "Invalid bitstream, chunks must be aligned\n");
+ return -EINVAL;
+ }
+ }
- memcpy(kbuf, buf, count);
+ priv->dma_nelms =
+ dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ if (priv->dma_nelms == 0) {
+ dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
+ return -ENOMEM;
+ }
/* enable clock */
err = clk_enable(priv->clk);
@@ -308,38 +404,67 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
goto out_free;
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
-
reinit_completion(&priv->dma_done);
- /* enable DMA and error IRQs */
- zynq_fpga_unmask_irqs(priv);
+ /* zynq_step_dma will turn on interrupts */
+ spin_lock_irqsave(&priv->dma_lock, flags);
+ priv->dma_elm = 0;
+ priv->cur_sg = sgt->sgl;
+ zynq_step_dma(priv);
+ spin_unlock_irqrestore(&priv->dma_lock, flags);
- /* the +1 in the src addr is used to hold off on DMA_DONE IRQ
- * until both AXI and PCAP are done ...
- */
- zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1);
- zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS);
+ timeout = wait_for_completion_timeout(&priv->dma_done,
+ msecs_to_jiffies(DMA_TIMEOUT_MS));
- /* convert #bytes to #words */
- transfer_length = (count + 3) / 4;
+ spin_lock_irqsave(&priv->dma_lock, flags);
+ zynq_fpga_set_irq(priv, 0);
+ priv->cur_sg = NULL;
+ spin_unlock_irqrestore(&priv->dma_lock, flags);
- zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length);
- zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
+ intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
- wait_for_completion(&priv->dma_done);
+ /* There doesn't seem to be a way to force cancel any DMA, so if
+ * something went wrong we are relying on the hardware to have halted
+ * the DMA before we get here, if there was we could use
+ * wait_for_completion_interruptible too.
+ */
- intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
- zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
+ if (intr_status & IXR_ERROR_FLAGS_MASK) {
+ why = "DMA reported error";
+ err = -EIO;
+ goto out_report;
+ }
- if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- dev_err(&mgr->dev, "Error configuring FPGA\n");
- err = -EFAULT;
+ if (priv->cur_sg ||
+ !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
+ if (timeout == 0)
+ why = "DMA timed out";
+ else
+ why = "DMA did not complete";
+ err = -EIO;
+ goto out_report;
}
+ err = 0;
+ goto out_clk;
+
+out_report:
+ dev_err(&mgr->dev,
+ "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
+ why,
+ intr_status,
+ zynq_fpga_read(priv, CTRL_OFFSET),
+ zynq_fpga_read(priv, LOCK_OFFSET),
+ zynq_fpga_read(priv, INT_MASK_OFFSET),
+ zynq_fpga_read(priv, STATUS_OFFSET),
+ zynq_fpga_read(priv, MCTRL_OFFSET));
+
+out_clk:
clk_disable(priv->clk);
out_free:
- dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
+ dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
return err;
}
@@ -400,9 +525,10 @@ static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
}
static const struct fpga_manager_ops zynq_fpga_ops = {
+ .initial_header_size = 128,
.state = zynq_fpga_ops_state,
.write_init = zynq_fpga_ops_write_init,
- .write = zynq_fpga_ops_write,
+ .write_sg = zynq_fpga_ops_write,
.write_complete = zynq_fpga_ops_write_complete,
};
@@ -416,6 +542,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ spin_lock_init(&priv->dma_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(dev, res);
@@ -452,7 +579,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
/* unlock the device */
zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
- zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+ zynq_fpga_set_irq(priv, 0);
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
priv);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
new file mode 100644
index 000000000000..04c1a0efa7a7
--- /dev/null
+++ b/drivers/fsi/Kconfig
@@ -0,0 +1,12 @@
+#
+# FSI subsystem
+#
+
+menu "FSI support"
+
+config FSI
+ tristate "FSI support"
+ ---help---
+ FSI - the FRU Support Interface - is a simple bus for low-level
+ access to POWER-based hardware.
+endmenu
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
new file mode 100644
index 000000000000..db0e5e7c1655
--- /dev/null
+++ b/drivers/fsi/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_FSI) += fsi-core.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
new file mode 100644
index 000000000000..3d55bd547178
--- /dev/null
+++ b/drivers/fsi/fsi-core.c
@@ -0,0 +1,59 @@
+/*
+ * FSI core driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+
+/* FSI core & Linux bus type definitions */
+
+static int fsi_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct fsi_driver *fsi_drv = to_fsi_drv(drv);
+ const struct fsi_device_id *id;
+
+ if (!fsi_drv->id_table)
+ return 0;
+
+ for (id = fsi_drv->id_table; id->engine_type; id++) {
+ if (id->engine_type != fsi_dev->engine_type)
+ continue;
+ if (id->version == FSI_VERSION_ANY ||
+ id->version == fsi_dev->version)
+ return 1;
+ }
+
+ return 0;
+}
+
+struct bus_type fsi_bus_type = {
+ .name = "fsi",
+ .match = fsi_bus_match,
+};
+EXPORT_SYMBOL_GPL(fsi_bus_type);
+
+static int fsi_init(void)
+{
+ return bus_register(&fsi_bus_type);
+}
+
+static void fsi_exit(void)
+{
+ bus_unregister(&fsi_bus_type);
+}
+
+module_init(fsi_init);
+module_exit(fsi_exit);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5fb4c6d9209b..81a80c82f1bd 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -47,12 +47,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
*/
- if ((channel->offermsg.monitor_allocated) &&
- (!channel->low_latency)) {
- /* Each u32 represents 32 channels */
- sync_set_bit(channel->offermsg.child_relid & 31,
- (unsigned long *) vmbus_connection.send_int_page +
- (channel->offermsg.child_relid >> 5));
+ if (channel->offermsg.monitor_allocated && !channel->low_latency) {
+ vmbus_send_interrupt(channel->offermsg.child_relid);
/* Get the child to parent monitor page */
monitorpage = vmbus_connection.monitor_pages[1];
@@ -157,6 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
}
init_completion(&open_info->waitevent);
+ open_info->waiting_channel = newchannel;
open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
@@ -181,7 +178,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(open_msg,
- sizeof(struct vmbus_channel_open_channel));
+ sizeof(struct vmbus_channel_open_channel), true);
if (ret != 0) {
err = ret;
@@ -194,6 +191,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_gpadl;
+ }
+
if (open_info->response.open_result.status) {
err = -EAGAIN;
goto error_free_gpadl;
@@ -233,7 +235,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
- return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+ return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
@@ -405,6 +407,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
return ret;
init_completion(&msginfo->waitevent);
+ msginfo->waiting_channel = channel;
gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
@@ -419,7 +422,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
- sizeof(*msginfo));
+ sizeof(*msginfo), true);
if (ret != 0)
goto cleanup;
@@ -433,14 +436,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
gpadl_body->gpadl = next_gpadl_handle;
ret = vmbus_post_msg(gpadl_body,
- submsginfo->msgsize -
- sizeof(*submsginfo));
+ submsginfo->msgsize - sizeof(*submsginfo),
+ true);
if (ret != 0)
goto cleanup;
}
wait_for_completion(&msginfo->waitevent);
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
/* At this point, we received the gpadl created msg */
*gpadl_handle = gpadlmsg->gpadl;
@@ -474,6 +482,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
return -ENOMEM;
init_completion(&info->waitevent);
+ info->waiting_channel = channel;
msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
@@ -485,14 +494,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
list_add_tail(&info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_gpadl_teardown));
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
+ true);
if (ret)
goto post_msg_err;
wait_for_completion(&info->waitevent);
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto post_msg_err;
+ }
+
post_msg_err:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry);
@@ -516,7 +530,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
int ret;
/*
- * process_chn_event(), running in the tasklet, can race
+ * vmbus_on_event(), running in the tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter
* could be freeing the ring_buffer pages.
@@ -557,7 +571,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;
- ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
+ true);
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
@@ -628,15 +643,14 @@ void vmbus_close(struct vmbus_channel *channel)
EXPORT_SYMBOL_GPL(vmbus_close);
int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u64 requestid,
- enum vmbus_packet_type type, u32 flags, bool kick_q)
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -655,9 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, num_vecs,
- lock, kick_q);
-
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs);
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -680,7 +692,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
enum vmbus_packet_type type, u32 flags)
{
return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
- type, flags, true);
+ type, flags);
}
EXPORT_SYMBOL(vmbus_sendpacket);
@@ -692,11 +704,9 @@ EXPORT_SYMBOL(vmbus_sendpacket);
* explicitly.
*/
int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid,
- u32 flags,
- bool kick_q)
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount, void *buffer, u32 bufferlen,
+ u64 requestid, u32 flags)
{
int i;
struct vmbus_channel_packet_page_buffer desc;
@@ -705,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
-
/*
* Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support
@@ -742,8 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3,
- lock, kick_q);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -757,9 +764,10 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u64 requestid)
{
u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+
return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
- buffer, bufferlen, requestid,
- flags, true);
+ buffer, bufferlen,
+ requestid, flags);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -778,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -798,8 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3,
- lock, true);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -817,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -856,8 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3,
- lock, true);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 26b419203f16..f33465d78a02 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -31,6 +31,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/hyperv.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -147,6 +148,29 @@ static const struct {
{ HV_RDV_GUID },
};
+/*
+ * The rescinded channel may be blocked waiting for a response from the host;
+ * take care of that.
+ */
+static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_msginfo *msginfo;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ msglistentry) {
+
+ if (msginfo->waiting_channel == channel) {
+ complete(&msginfo->waitevent);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
static bool is_unsupported_vmbus_devs(const uuid_le *guid)
{
int i;
@@ -180,33 +204,34 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
* @buf: Raw buffer channel data
*
* @icmsghdrp is of type &struct icmsg_hdr.
- * @negop is of type &struct icmsg_negotiate.
* Set up and fill in default negotiate response message.
*
- * The fw_version specifies the framework version that
- * we can support and srv_version specifies the service
- * version we can support.
+ * The fw_version and fw_vercnt specifies the framework version that
+ * we can support.
+ *
+ * The srv_version and srv_vercnt specifies the service
+ * versions we can support.
+ *
+ * Versions are given in decreasing order.
+ *
+ * nego_fw_version and nego_srv_version store the selected protocol versions.
*
* Mainly used by Hyper-V drivers.
*/
bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
- struct icmsg_negotiate *negop, u8 *buf,
- int fw_version, int srv_version)
+ u8 *buf, const int *fw_version, int fw_vercnt,
+ const int *srv_version, int srv_vercnt,
+ int *nego_fw_version, int *nego_srv_version)
{
int icframe_major, icframe_minor;
int icmsg_major, icmsg_minor;
int fw_major, fw_minor;
int srv_major, srv_minor;
- int i;
+ int i, j;
bool found_match = false;
+ struct icmsg_negotiate *negop;
icmsghdrp->icmsgsize = 0x10;
- fw_major = (fw_version >> 16);
- fw_minor = (fw_version & 0xFFFF);
-
- srv_major = (srv_version >> 16);
- srv_minor = (srv_version & 0xFFFF);
-
negop = (struct icmsg_negotiate *)&buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
@@ -222,13 +247,22 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
* support.
*/
- for (i = 0; i < negop->icframe_vercnt; i++) {
- if ((negop->icversion_data[i].major == fw_major) &&
- (negop->icversion_data[i].minor == fw_minor)) {
- icframe_major = negop->icversion_data[i].major;
- icframe_minor = negop->icversion_data[i].minor;
- found_match = true;
+ for (i = 0; i < fw_vercnt; i++) {
+ fw_major = (fw_version[i] >> 16);
+ fw_minor = (fw_version[i] & 0xFFFF);
+
+ for (j = 0; j < negop->icframe_vercnt; j++) {
+ if ((negop->icversion_data[j].major == fw_major) &&
+ (negop->icversion_data[j].minor == fw_minor)) {
+ icframe_major = negop->icversion_data[j].major;
+ icframe_minor = negop->icversion_data[j].minor;
+ found_match = true;
+ break;
+ }
}
+
+ if (found_match)
+ break;
}
if (!found_match)
@@ -236,14 +270,26 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
found_match = false;
- for (i = negop->icframe_vercnt;
- (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
- if ((negop->icversion_data[i].major == srv_major) &&
- (negop->icversion_data[i].minor == srv_minor)) {
- icmsg_major = negop->icversion_data[i].major;
- icmsg_minor = negop->icversion_data[i].minor;
- found_match = true;
+ for (i = 0; i < srv_vercnt; i++) {
+ srv_major = (srv_version[i] >> 16);
+ srv_minor = (srv_version[i] & 0xFFFF);
+
+ for (j = negop->icframe_vercnt;
+ (j < negop->icframe_vercnt + negop->icmsg_vercnt);
+ j++) {
+
+ if ((negop->icversion_data[j].major == srv_major) &&
+ (negop->icversion_data[j].minor == srv_minor)) {
+
+ icmsg_major = negop->icversion_data[j].major;
+ icmsg_minor = negop->icversion_data[j].minor;
+ found_match = true;
+ break;
+ }
}
+
+ if (found_match)
+ break;
}
/*
@@ -260,6 +306,12 @@ fw_error:
negop->icmsg_vercnt = 1;
}
+ if (nego_fw_version)
+ *nego_fw_version = (icframe_major << 16) | icframe_minor;
+
+ if (nego_srv_version)
+ *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
+
negop->icversion_data[0].major = icframe_major;
negop->icversion_data[0].minor = icframe_minor;
negop->icversion_data[1].major = icmsg_major;
@@ -280,13 +332,15 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel)
return NULL;
- channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
+ tasklet_init(&channel->callback_event,
+ vmbus_on_event, (unsigned long)channel);
+
return channel;
}
@@ -295,15 +349,17 @@ static struct vmbus_channel *alloc_channel(void)
*/
static void free_channel(struct vmbus_channel *channel)
{
+ tasklet_kill(&channel->callback_event);
kfree(channel);
}
static void percpu_channel_enq(void *arg)
{
struct vmbus_channel *channel = arg;
- int cpu = smp_processor_id();
+ struct hv_per_cpu_context *hv_cpu
+ = this_cpu_ptr(hv_context.cpu_context);
- list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
+ list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
}
static void percpu_channel_deq(void *arg)
@@ -321,24 +377,21 @@ static void vmbus_release_relid(u32 relid)
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
+ vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
}
void hv_event_tasklet_disable(struct vmbus_channel *channel)
{
- struct tasklet_struct *tasklet;
- tasklet = hv_context.event_dpc[channel->target_cpu];
- tasklet_disable(tasklet);
+ tasklet_disable(&channel->callback_event);
}
void hv_event_tasklet_enable(struct vmbus_channel *channel)
{
- struct tasklet_struct *tasklet;
- tasklet = hv_context.event_dpc[channel->target_cpu];
- tasklet_enable(tasklet);
+ tasklet_enable(&channel->callback_event);
/* In case there is any pending event */
- tasklet_schedule(tasklet);
+ tasklet_schedule(&channel->callback_event);
}
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
@@ -673,9 +726,12 @@ static void vmbus_wait_for_unload(void)
break;
for_each_online_cpu(cpu) {
- page_addr = hv_context.synic_message_page[cpu];
- msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ page_addr = hv_cpu->synic_message_page;
+ msg = (struct hv_message *)page_addr
+ + VMBUS_MESSAGE_SINT;
message_type = READ_ONCE(msg->header.message_type);
if (message_type == HVMSG_NONE)
@@ -699,7 +755,10 @@ static void vmbus_wait_for_unload(void)
* messages after we reconnect.
*/
for_each_online_cpu(cpu) {
- page_addr = hv_context.synic_message_page[cpu];
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ page_addr = hv_cpu->synic_message_page;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}
@@ -728,7 +787,8 @@ void vmbus_initiate_unload(bool crash)
init_completion(&vmbus_connection.unload_event);
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
hdr.msgtype = CHANNELMSG_UNLOAD;
- vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
+ vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
+ !crash);
/*
* vmbus_initiate_unload() is also called on crash and the crash can be
@@ -759,13 +819,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
}
/*
- * By default we setup state to enable batched
- * reading. A specific service can choose to
- * disable this prior to opening the channel.
- */
- newchannel->batched_reading = true;
-
- /*
* Setup state for signalling the host.
*/
newchannel->sig_event = (struct hv_input_signal_event *)
@@ -823,6 +876,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
channel->rescind = true;
spin_unlock_irqrestore(&channel->lock, flags);
+ vmbus_rescind_cleanup(channel);
+
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
@@ -1116,8 +1171,8 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_message_header));
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
+ true);
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6ce8b874e833..a8366fec1458 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -93,12 +93,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* all the CPUs. This is needed for kexec to work correctly where
* the CPU attempting to connect may not be CPU 0.
*/
- if (version >= VERSION_WIN8_1) {
- msg->target_vcpu = hv_context.vp_index[get_cpu()];
- put_cpu();
- } else {
+ if (version >= VERSION_WIN8_1)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+ else
msg->target_vcpu = 0;
- }
/*
* Add to list before we send the request since we may
@@ -111,7 +109,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_initiate_contact));
+ sizeof(struct vmbus_channel_initiate_contact),
+ true);
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
@@ -220,11 +219,8 @@ int vmbus_connect(void)
goto cleanup;
vmbus_proto_version = version;
- pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
- host_info_eax, host_info_ebx >> 16,
- host_info_ebx & 0xFFFF, host_info_ecx,
- host_info_edx >> 24, host_info_edx & 0xFFFFFF,
- version >> 16, version & 0xFFFF);
+ pr_info("Vmbus version:%d.%d\n",
+ version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
@@ -264,29 +260,6 @@ void vmbus_disconnect(void)
}
/*
- * Map the given relid to the corresponding channel based on the
- * per-cpu list of channels that have been affinitized to this CPU.
- * This will be used in the channel callback path as we can do this
- * mapping in a lock-free fashion.
- */
-static struct vmbus_channel *pcpu_relid2channel(u32 relid)
-{
- struct vmbus_channel *channel;
- struct vmbus_channel *found_channel = NULL;
- int cpu = smp_processor_id();
- struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
-
- list_for_each_entry(channel, pcpu_head, percpu_list) {
- if (channel->offermsg.child_relid == relid) {
- found_channel = channel;
- break;
- }
- }
-
- return found_channel;
-}
-
-/*
* relid2channel - Get the channel object given its
* child relative id (ie channel id)
*/
@@ -322,23 +295,12 @@ struct vmbus_channel *relid2channel(u32 relid)
}
/*
- * process_chn_event - Process a channel event notification
+ * vmbus_on_event - Process a channel event notification
*/
-static void process_chn_event(u32 relid)
+void vmbus_on_event(unsigned long data)
{
- struct vmbus_channel *channel;
- void *arg;
- bool read_state;
- u32 bytes_to_read;
-
- /*
- * Find the channel based on this relid and invokes the
- * channel callback to process the event
- */
- channel = pcpu_relid2channel(relid);
-
- if (!channel)
- return;
+ struct vmbus_channel *channel = (void *) data;
+ void (*callback_fn)(void *);
/*
* A channel once created is persistent even when there
@@ -348,10 +310,13 @@ static void process_chn_event(u32 relid)
* Thus, checking and invoking the driver specific callback takes
* care of orderly unloading of the driver.
*/
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(callback_fn == NULL))
+ return;
- if (channel->onchannel_callback != NULL) {
- arg = channel->channel_callback_context;
- read_state = channel->batched_reading;
+ (*callback_fn)(channel->channel_callback_context);
+
+ if (channel->callback_mode == HV_CALL_BATCHED) {
/*
* This callback reads the messages sent by the host.
* We can optimize host to guest signaling by ensuring:
@@ -363,71 +328,10 @@ static void process_chn_event(u32 relid)
* state is set we check to see if additional packets are
* available to read. In this case we repeat the process.
*/
+ if (hv_end_read(&channel->inbound) != 0) {
+ hv_begin_read(&channel->inbound);
- do {
- if (read_state)
- hv_begin_read(&channel->inbound);
- channel->onchannel_callback(arg);
- if (read_state)
- bytes_to_read = hv_end_read(&channel->inbound);
- else
- bytes_to_read = 0;
- } while (read_state && (bytes_to_read != 0));
- }
-}
-
-/*
- * vmbus_on_event - Handler for events
- */
-void vmbus_on_event(unsigned long data)
-{
- u32 dword;
- u32 maxdword;
- int bit;
- u32 relid;
- u32 *recv_int_page = NULL;
- void *page_addr;
- int cpu = smp_processor_id();
- union hv_synic_event_flags *event;
-
- if (vmbus_proto_version < VERSION_WIN8) {
- maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
- recv_int_page = vmbus_connection.recv_int_page;
- } else {
- /*
- * When the host is win8 and beyond, the event page
- * can be directly checked to get the id of the channel
- * that has the interrupt pending.
- */
- maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr +
- VMBUS_MESSAGE_SINT;
- recv_int_page = event->flags32;
- }
-
-
-
- /* Check events */
- if (!recv_int_page)
- return;
- for (dword = 0; dword < maxdword; dword++) {
- if (!recv_int_page[dword])
- continue;
- for (bit = 0; bit < 32; bit++) {
- if (sync_test_and_clear_bit(bit,
- (unsigned long *)&recv_int_page[dword])) {
- relid = (dword << 5) + bit;
-
- if (relid == 0)
- /*
- * Special case - vmbus
- * channel protocol msg
- */
- continue;
-
- process_chn_event(relid);
- }
+ tasklet_schedule(&channel->callback_event);
}
}
}
@@ -435,7 +339,7 @@ void vmbus_on_event(unsigned long data)
/*
* vmbus_post_msg - Send a msg on the vmbus's message connection
*/
-int vmbus_post_msg(void *buffer, size_t buflen)
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
{
union hv_connection_id conn_id;
int ret = 0;
@@ -450,7 +354,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
* insufficient resources. Retry the operation a couple of
* times before giving up.
*/
- while (retries < 20) {
+ while (retries < 100) {
ret = hv_post_message(conn_id, 1, buffer, buflen);
switch (ret) {
@@ -473,8 +377,14 @@ int vmbus_post_msg(void *buffer, size_t buflen)
}
retries++;
- udelay(usec);
- if (usec < 2048)
+ if (can_sleep && usec > 1000)
+ msleep(usec / 1000);
+ else if (usec < MAX_UDELAY_MS * 1000)
+ udelay(usec);
+ else
+ mdelay(usec / 1000);
+
+ if (usec < 256000)
usec *= 2;
}
return ret;
@@ -487,12 +397,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
{
u32 child_relid = channel->offermsg.child_relid;
- if (!channel->is_dedicated_interrupt) {
- /* Each u32 represents 32 channels */
- sync_set_bit(child_relid & 31,
- (unsigned long *)vmbus_connection.send_int_page +
- (child_relid >> 5));
- }
+ if (!channel->is_dedicated_interrupt)
+ vmbus_send_interrupt(child_relid);
hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index b44b32f21e61..665a64f1611e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -36,7 +36,6 @@
/* The one and only */
struct hv_context hv_context = {
.synic_initialized = false,
- .hypercall_page = NULL,
};
#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
@@ -44,276 +43,20 @@ struct hv_context hv_context = {
#define HV_MIN_DELTA_TICKS 1
/*
- * query_hypervisor_info - Get version info of the windows hypervisor
- */
-unsigned int host_info_eax;
-unsigned int host_info_ebx;
-unsigned int host_info_ecx;
-unsigned int host_info_edx;
-
-static int query_hypervisor_info(void)
-{
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
- unsigned int max_leaf;
- unsigned int op;
-
- /*
- * Its assumed that this is called after confirming that Viridian
- * is present. Query id and revision.
- */
- eax = 0;
- ebx = 0;
- ecx = 0;
- edx = 0;
- op = HVCPUID_VENDOR_MAXFUNCTION;
- cpuid(op, &eax, &ebx, &ecx, &edx);
-
- max_leaf = eax;
-
- if (max_leaf >= HVCPUID_VERSION) {
- eax = 0;
- ebx = 0;
- ecx = 0;
- edx = 0;
- op = HVCPUID_VERSION;
- cpuid(op, &eax, &ebx, &ecx, &edx);
- host_info_eax = eax;
- host_info_ebx = ebx;
- host_info_ecx = ecx;
- host_info_edx = edx;
- }
- return max_leaf;
-}
-
-/*
- * hv_do_hypercall- Invoke the specified hypercall
- */
-u64 hv_do_hypercall(u64 control, void *input, void *output)
-{
- u64 input_address = (input) ? virt_to_phys(input) : 0;
- u64 output_address = (output) ? virt_to_phys(output) : 0;
- void *hypercall_page = hv_context.hypercall_page;
-#ifdef CONFIG_X86_64
- u64 hv_status = 0;
-
- if (!hypercall_page)
- return (u64)ULLONG_MAX;
-
- __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
- __asm__ __volatile__("call *%3" : "=a" (hv_status) :
- "c" (control), "d" (input_address),
- "m" (hypercall_page));
-
- return hv_status;
-
-#else
-
- u32 control_hi = control >> 32;
- u32 control_lo = control & 0xFFFFFFFF;
- u32 hv_status_hi = 1;
- u32 hv_status_lo = 1;
- u32 input_address_hi = input_address >> 32;
- u32 input_address_lo = input_address & 0xFFFFFFFF;
- u32 output_address_hi = output_address >> 32;
- u32 output_address_lo = output_address & 0xFFFFFFFF;
-
- if (!hypercall_page)
- return (u64)ULLONG_MAX;
-
- __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
- "=a"(hv_status_lo) : "d" (control_hi),
- "a" (control_lo), "b" (input_address_hi),
- "c" (input_address_lo), "D"(output_address_hi),
- "S"(output_address_lo), "m" (hypercall_page));
-
- return hv_status_lo | ((u64)hv_status_hi << 32);
-#endif /* !x86_64 */
-}
-EXPORT_SYMBOL_GPL(hv_do_hypercall);
-
-#ifdef CONFIG_X86_64
-static u64 read_hv_clock_tsc(struct clocksource *arg)
-{
- u64 current_tick;
- struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
-
- if (tsc_pg->tsc_sequence != 0) {
- /*
- * Use the tsc page to compute the value.
- */
-
- while (1) {
- u64 tmp;
- u32 sequence = tsc_pg->tsc_sequence;
- u64 cur_tsc;
- u64 scale = tsc_pg->tsc_scale;
- s64 offset = tsc_pg->tsc_offset;
-
- rdtscll(cur_tsc);
- /* current_tick = ((cur_tsc *scale) >> 64) + offset */
- asm("mulq %3"
- : "=d" (current_tick), "=a" (tmp)
- : "a" (cur_tsc), "r" (scale));
-
- current_tick += offset;
- if (tsc_pg->tsc_sequence == sequence)
- return current_tick;
-
- if (tsc_pg->tsc_sequence != 0)
- continue;
- /*
- * Fallback using MSR method.
- */
- break;
- }
- }
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
- return current_tick;
-}
-
-static struct clocksource hyperv_cs_tsc = {
- .name = "hyperv_clocksource_tsc_page",
- .rating = 425,
- .read = read_hv_clock_tsc,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-#endif
-
-
-/*
* hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
*/
int hv_init(void)
{
- int max_leaf;
- union hv_x64_msr_hypercall_contents hypercall_msr;
- void *virtaddr = NULL;
-
- memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
- memset(hv_context.synic_message_page, 0,
- sizeof(void *) * NR_CPUS);
- memset(hv_context.post_msg_page, 0,
- sizeof(void *) * NR_CPUS);
- memset(hv_context.vp_index, 0,
- sizeof(int) * NR_CPUS);
- memset(hv_context.event_dpc, 0,
- sizeof(void *) * NR_CPUS);
- memset(hv_context.msg_dpc, 0,
- sizeof(void *) * NR_CPUS);
- memset(hv_context.clk_evt, 0,
- sizeof(void *) * NR_CPUS);
-
- max_leaf = query_hypervisor_info();
+ if (!hv_is_hypercall_page_setup())
+ return -ENOTSUPP;
- /*
- * Write our OS ID.
- */
- hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
- wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
-
- /* See if the hypercall page is already set */
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
-
- if (!virtaddr)
- goto cleanup;
-
- hypercall_msr.enable = 1;
-
- hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
- /* Confirm that hypercall page did get setup. */
- hypercall_msr.as_uint64 = 0;
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
- if (!hypercall_msr.enable)
- goto cleanup;
-
- hv_context.hypercall_page = virtaddr;
-
-#ifdef CONFIG_X86_64
- if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- union hv_x64_msr_hypercall_contents tsc_msr;
- void *va_tsc;
-
- va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
- if (!va_tsc)
- goto cleanup;
- hv_context.tsc_page = va_tsc;
-
- rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
+ if (!hv_context.cpu_context)
+ return -ENOMEM;
- tsc_msr.enable = 1;
- tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
-
- wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
- clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- }
-#endif
return 0;
-
-cleanup:
- if (virtaddr) {
- if (hypercall_msr.enable) {
- hypercall_msr.as_uint64 = 0;
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- }
-
- vfree(virtaddr);
- }
-
- return -ENOTSUPP;
-}
-
-/*
- * hv_cleanup - Cleanup routine.
- *
- * This routine is called normally during driver unloading or exiting.
- */
-void hv_cleanup(bool crash)
-{
- union hv_x64_msr_hypercall_contents hypercall_msr;
-
- /* Reset our OS id */
- wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
-
- if (hv_context.hypercall_page) {
- hypercall_msr.as_uint64 = 0;
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- if (!crash)
- vfree(hv_context.hypercall_page);
- hv_context.hypercall_page = NULL;
- }
-
-#ifdef CONFIG_X86_64
- /*
- * Cleanup the TSC page based CS.
- */
- if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- /*
- * Crash can happen in an interrupt context and unregistering
- * a clocksource is impossible and redundant in this case.
- */
- if (!oops_in_progress) {
- clocksource_change_rating(&hyperv_cs_tsc, 10);
- clocksource_unregister(&hyperv_cs_tsc);
- }
-
- hypercall_msr.as_uint64 = 0;
- wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
- if (!crash)
- vfree(hv_context.tsc_page);
- hv_context.tsc_page = NULL;
- }
-#endif
}
/*
@@ -325,25 +68,24 @@ int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size)
{
-
struct hv_input_post_message *aligned_msg;
+ struct hv_per_cpu_context *hv_cpu;
u64 status;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE;
- aligned_msg = (struct hv_input_post_message *)
- hv_context.post_msg_page[get_cpu()];
-
+ hv_cpu = get_cpu_ptr(hv_context.cpu_context);
+ aligned_msg = hv_cpu->post_msg_page;
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
+ put_cpu_ptr(hv_cpu);
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
- put_cpu();
return status & 0xFFFF;
}
@@ -354,16 +96,16 @@ static int hv_ce_set_next_event(unsigned long delta,
WARN_ON(!clockevent_state_oneshot(evt));
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+ hv_get_current_tick(current_tick);
current_tick += delta;
- wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
+ hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
- wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+ hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
+ hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
return 0;
}
@@ -375,7 +117,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.enable = 1;
timer_cfg.auto_enable = 1;
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
- wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -400,8 +142,6 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
int hv_synic_alloc(void)
{
- size_t size = sizeof(struct tasklet_struct);
- size_t ced_size = sizeof(struct clock_event_device);
int cpu;
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
@@ -411,52 +151,42 @@ int hv_synic_alloc(void)
goto err;
}
- for_each_online_cpu(cpu) {
- hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
- if (hv_context.event_dpc[cpu] == NULL) {
- pr_err("Unable to allocate event dpc\n");
- goto err;
- }
- tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
- hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
- if (hv_context.msg_dpc[cpu] == NULL) {
- pr_err("Unable to allocate event dpc\n");
- goto err;
- }
- tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+ memset(hv_cpu, 0, sizeof(*hv_cpu));
+ tasklet_init(&hv_cpu->msg_dpc,
+ vmbus_on_msg_dpc, (unsigned long) hv_cpu);
- hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
- if (hv_context.clk_evt[cpu] == NULL) {
+ hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
+ GFP_KERNEL);
+ if (hv_cpu->clk_evt == NULL) {
pr_err("Unable to allocate clock event device\n");
goto err;
}
+ hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
- hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
-
- hv_context.synic_message_page[cpu] =
+ hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
-
- if (hv_context.synic_message_page[cpu] == NULL) {
+ if (hv_cpu->synic_message_page == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
- hv_context.synic_event_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
-
- if (hv_context.synic_event_page[cpu] == NULL) {
+ hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
goto err;
}
- hv_context.post_msg_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
-
- if (hv_context.post_msg_page[cpu] == NULL) {
+ hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->post_msg_page == NULL) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
+
+ INIT_LIST_HEAD(&hv_cpu->chan_list);
}
return 0;
@@ -464,26 +194,24 @@ err:
return -ENOMEM;
}
-static void hv_synic_free_cpu(int cpu)
-{
- kfree(hv_context.event_dpc[cpu]);
- kfree(hv_context.msg_dpc[cpu]);
- kfree(hv_context.clk_evt[cpu]);
- if (hv_context.synic_event_page[cpu])
- free_page((unsigned long)hv_context.synic_event_page[cpu]);
- if (hv_context.synic_message_page[cpu])
- free_page((unsigned long)hv_context.synic_message_page[cpu]);
- if (hv_context.post_msg_page[cpu])
- free_page((unsigned long)hv_context.post_msg_page[cpu]);
-}
void hv_synic_free(void)
{
int cpu;
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ if (hv_cpu->synic_event_page)
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ if (hv_cpu->synic_message_page)
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ if (hv_cpu->post_msg_page)
+ free_page((unsigned long)hv_cpu->post_msg_page);
+ }
+
kfree(hv_context.hv_numa_map);
- for_each_online_cpu(cpu)
- hv_synic_free_cpu(cpu);
}
/*
@@ -493,54 +221,49 @@ void hv_synic_free(void)
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-void hv_synic_init(void *arg)
+int hv_synic_init(unsigned int cpu)
{
- u64 version;
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
u64 vp_index;
- int cpu = smp_processor_id();
-
- if (!hv_context.hypercall_page)
- return;
-
- /* Check the version */
- rdmsrl(HV_X64_MSR_SVERSION, version);
-
/* Setup the Synic's message page */
- rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ hv_get_simp(simp.as_uint64);
simp.simp_enabled = 1;
- simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> PAGE_SHIFT;
- wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ hv_set_simp(simp.as_uint64);
/* Setup the Synic's event page */
- rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 1;
- siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> PAGE_SHIFT;
- wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
- rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
shared_sint.as_uint64 = 0;
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
shared_sint.auto_eoi = true;
- wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
/* Enable the global synic bit */
- rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ hv_get_synic_state(sctrl.as_uint64);
sctrl.enable = 1;
- wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ hv_set_synic_state(sctrl.as_uint64);
hv_context.synic_initialized = true;
@@ -549,20 +272,18 @@ void hv_synic_init(void *arg)
* of cpuid and Linux' notion of cpuid.
* This array will be indexed using Linux cpuid.
*/
- rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+ hv_get_vp_index(vp_index);
hv_context.vp_index[cpu] = (u32)vp_index;
- INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
-
/*
* Register the per-cpu clockevent source.
*/
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
- clockevents_config_and_register(hv_context.clk_evt[cpu],
+ clockevents_config_and_register(hv_cpu->clk_evt,
HV_TIMER_FREQUENCY,
HV_MIN_DELTA_TICKS,
HV_MAX_MAX_DELTA_TICKS);
- return;
+ return 0;
}
/*
@@ -575,52 +296,94 @@ void hv_synic_clockevents_cleanup(void)
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
return;
- for_each_present_cpu(cpu)
- clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
+ for_each_present_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ clockevents_unbind_device(hv_cpu->clk_evt, cpu);
+ }
}
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
-void hv_synic_cleanup(void *arg)
+int hv_synic_cleanup(unsigned int cpu)
{
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
- int cpu = smp_processor_id();
+ struct vmbus_channel *channel, *sc;
+ bool channel_found = false;
+ unsigned long flags;
if (!hv_context.synic_initialized)
- return;
+ return -EFAULT;
+
+ /*
+ * Search for channels which are bound to the CPU we're about to
+ * cleanup. In case we find one and vmbus is still connected we need to
+ * fail, this will effectively prevent CPU offlining. There is no way
+ * we can re-bind channels to different CPUs for now.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (channel->target_cpu == cpu) {
+ channel_found = true;
+ break;
+ }
+ spin_lock_irqsave(&channel->lock, flags);
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ if (sc->target_cpu == cpu) {
+ channel_found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&channel->lock, flags);
+ if (channel_found)
+ break;
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ if (channel_found && vmbus_connection.conn_state == CONNECTED)
+ return -EBUSY;
/* Turn off clockevent device */
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
- clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
- hv_ce_shutdown(hv_context.clk_evt[cpu]);
+ struct hv_per_cpu_context *hv_cpu
+ = this_cpu_ptr(hv_context.cpu_context);
+
+ clockevents_unbind_device(hv_cpu->clk_evt, cpu);
+ hv_ce_shutdown(hv_cpu->clk_evt);
+ put_cpu_ptr(hv_cpu);
}
- rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+ shared_sint.as_uint64);
- rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ hv_get_simp(simp.as_uint64);
simp.simp_enabled = 0;
simp.base_simp_gpa = 0;
- wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ hv_set_simp(simp.as_uint64);
- rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0;
- wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ hv_set_siefp(siefp.as_uint64);
/* Disable the global synic bit */
- rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ hv_get_synic_state(sctrl.as_uint64);
sctrl.enable = 0;
- wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ hv_set_synic_state(sctrl.as_uint64);
+
+ return 0;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 14c3dc4bd23c..5fd03e59cee5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -587,6 +587,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
spin_lock_irqsave(&dm_device.ha_lock, flags);
dm_device.num_pages_onlined += mem->nr_pages;
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ /* Fall through */
case MEM_CANCEL_ONLINE:
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 8b2ba98831ec..9aee6014339d 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -31,6 +31,16 @@
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+#define FCOPY_VER_COUNT 1
+static const int fcopy_versions[] = {
+ WIN8_SRV_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+ UTIL_FW_VERSION
+};
+
/*
* Global state maintained for transaction that is being processed.
* For a class of integration services, including the "file copy service",
@@ -61,6 +71,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
static const char fcopy_devname[] = "vmbus/hv_fcopy";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
+static struct completion release_event;
/*
* This state maintains the version number registered by the daemon.
*/
@@ -227,8 +238,6 @@ void hv_fcopy_onchannelcallback(void *context)
u64 requestid;
struct hv_fcopy_hdr *fcopy_msg;
struct icmsg_hdr *icmsghdr;
- struct icmsg_negotiate *negop = NULL;
- int util_fw_version;
int fcopy_srv_version;
if (fcopy_transaction.state > HVUTIL_READY)
@@ -242,10 +251,15 @@ void hv_fcopy_onchannelcallback(void *context)
icmsghdr = (struct icmsg_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- util_fw_version = UTIL_FW_VERSION;
- fcopy_srv_version = WIN8_SRV_VERSION;
- vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
- util_fw_version, fcopy_srv_version);
+ if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
+ fw_versions, FW_VER_COUNT,
+ fcopy_versions, FCOPY_VER_COUNT,
+ NULL, &fcopy_srv_version)) {
+
+ pr_info("FCopy IC version %d.%d\n",
+ fcopy_srv_version >> 16,
+ fcopy_srv_version & 0xFFFF);
+ }
} else {
fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
@@ -317,6 +331,7 @@ static void fcopy_on_reset(void)
if (cancel_delayed_work_sync(&fcopy_timeout_work))
fcopy_respond_to_host(HV_E_FAIL);
+ complete(&release_event);
}
int hv_fcopy_init(struct hv_util_service *srv)
@@ -324,6 +339,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer;
fcopy_transaction.recv_channel = srv->channel;
+ init_completion(&release_event);
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
@@ -345,4 +361,5 @@ void hv_fcopy_deinit(void)
fcopy_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&fcopy_timeout_work);
hvutil_transport_destroy(hvt);
+ wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5e1fdc8d32ab..de263712e247 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -46,6 +46,19 @@
#define WIN8_SRV_MINOR 0
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+#define KVP_VER_COUNT 3
+static const int kvp_versions[] = {
+ WIN8_SRV_VERSION,
+ WIN7_SRV_VERSION,
+ WS2008_SRV_VERSION
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+ UTIL_FW_VERSION,
+ UTIL_WS2K8_FW_VERSION
+};
+
/*
* Global state maintained for transaction that is being processed. For a class
* of integration services, including the "KVP service", the specified protocol
@@ -88,6 +101,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
+static struct completion release_event;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
@@ -609,8 +623,6 @@ void hv_kvp_onchannelcallback(void *context)
struct hv_kvp_msg *kvp_msg;
struct icmsg_hdr *icmsghdrp;
- struct icmsg_negotiate *negop = NULL;
- int util_fw_version;
int kvp_srv_version;
static enum {NEGO_NOT_STARTED,
NEGO_IN_PROGRESS,
@@ -639,28 +651,14 @@ void hv_kvp_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- /*
- * Based on the host, select appropriate
- * framework and service versions we will
- * negotiate.
- */
- switch (vmbus_proto_version) {
- case (VERSION_WS2008):
- util_fw_version = UTIL_WS2K8_FW_VERSION;
- kvp_srv_version = WS2008_SRV_VERSION;
- break;
- case (VERSION_WIN7):
- util_fw_version = UTIL_FW_VERSION;
- kvp_srv_version = WIN7_SRV_VERSION;
- break;
- default:
- util_fw_version = UTIL_FW_VERSION;
- kvp_srv_version = WIN8_SRV_VERSION;
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, fw_versions, FW_VER_COUNT,
+ kvp_versions, KVP_VER_COUNT,
+ NULL, &kvp_srv_version)) {
+ pr_info("KVP IC version %d.%d\n",
+ kvp_srv_version >> 16,
+ kvp_srv_version & 0xFFFF);
}
- vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, util_fw_version,
- kvp_srv_version);
-
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
@@ -716,6 +714,7 @@ static void kvp_on_reset(void)
if (cancel_delayed_work_sync(&kvp_timeout_work))
kvp_respond_to_host(NULL, HV_E_FAIL);
kvp_transaction.state = HVUTIL_DEVICE_INIT;
+ complete(&release_event);
}
int
@@ -724,6 +723,7 @@ hv_kvp_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer;
kvp_transaction.recv_channel = srv->channel;
+ init_completion(&release_event);
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
@@ -747,4 +747,5 @@ void hv_kvp_deinit(void)
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt);
+ wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index eee238cc60bd..bcc03f0748d6 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,6 +31,16 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
+#define VSS_VER_COUNT 1
+static const int vss_versions[] = {
+ VSS_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+ UTIL_FW_VERSION
+};
+
/*
* Timeout values are based on expecations from host
*/
@@ -69,6 +79,7 @@ static int dm_reg_value;
static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
static struct hvutil_transport *hvt;
+static struct completion release_event;
static void vss_timeout_func(struct work_struct *dummy);
static void vss_handle_request(struct work_struct *dummy);
@@ -293,10 +304,9 @@ void hv_vss_onchannelcallback(void *context)
u32 recvlen;
u64 requestid;
struct hv_vss_msg *vss_msg;
-
+ int vss_srv_version;
struct icmsg_hdr *icmsghdrp;
- struct icmsg_negotiate *negop = NULL;
if (vss_transaction.state > HVUTIL_READY)
return;
@@ -309,9 +319,15 @@ void hv_vss_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, UTIL_FW_VERSION,
- VSS_VERSION);
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, fw_versions, FW_VER_COUNT,
+ vss_versions, VSS_VER_COUNT,
+ NULL, &vss_srv_version)) {
+
+ pr_info("VSS IC version %d.%d\n",
+ vss_srv_version >> 16,
+ vss_srv_version & 0xFFFF);
+ }
} else {
vss_msg = (struct hv_vss_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
@@ -345,11 +361,13 @@ static void vss_on_reset(void)
if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_DEVICE_INIT;
+ complete(&release_event);
}
int
hv_vss_init(struct hv_util_service *srv)
{
+ init_completion(&release_event);
if (vmbus_proto_version < VERSION_WIN8_1) {
pr_warn("Integration service 'Backup (volume snapshot)'"
" not supported on this host version.\n");
@@ -382,4 +400,5 @@ void hv_vss_deinit(void)
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work);
hvutil_transport_destroy(hvt);
+ wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index e7707747f56d..3042eaa13062 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -27,6 +27,9 @@
#include <linux/sysctl.h>
#include <linux/reboot.h>
#include <linux/hyperv.h>
+#include <linux/clockchips.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -57,7 +60,31 @@
static int sd_srv_version;
static int ts_srv_version;
static int hb_srv_version;
-static int util_fw_version;
+
+#define SD_VER_COUNT 2
+static const int sd_versions[] = {
+ SD_VERSION,
+ SD_VERSION_1
+};
+
+#define TS_VER_COUNT 3
+static const int ts_versions[] = {
+ TS_VERSION,
+ TS_VERSION_3,
+ TS_VERSION_1
+};
+
+#define HB_VER_COUNT 2
+static const int hb_versions[] = {
+ HB_VERSION,
+ HB_VERSION_1
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+ UTIL_FW_VERSION,
+ UTIL_WS2K8_FW_VERSION
+};
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
@@ -118,7 +145,6 @@ static void shutdown_onchannelcallback(void *context)
struct shutdown_msg_data *shutdown_msg;
struct icmsg_hdr *icmsghdrp;
- struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, shut_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -128,9 +154,14 @@ static void shutdown_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, negop,
- shut_txf_buf, util_fw_version,
- sd_srv_version);
+ if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
+ fw_versions, FW_VER_COUNT,
+ sd_versions, SD_VER_COUNT,
+ NULL, &sd_srv_version)) {
+ pr_info("Shutdown IC version %d.%d\n",
+ sd_srv_version >> 16,
+ sd_srv_version & 0xFFFF);
+ }
} else {
shutdown_msg =
(struct shutdown_msg_data *)&shut_txf_buf[
@@ -181,31 +212,17 @@ struct adj_time_work {
static void hv_set_host_time(struct work_struct *work)
{
- struct adj_time_work *wrk;
- s64 host_tns;
- u64 newtime;
- struct timespec host_ts;
+ struct adj_time_work *wrk;
+ struct timespec64 host_ts;
+ u64 reftime, newtime;
wrk = container_of(work, struct adj_time_work, work);
- newtime = wrk->host_time;
- if (ts_srv_version > TS_VERSION_3) {
- /*
- * Some latency has been introduced since Hyper-V generated
- * its time sample. Take that latency into account before
- * using TSC reference time sample from Hyper-V.
- *
- * This sample is given by TimeSync v4 and above hosts.
- */
- u64 current_tick;
-
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
- newtime += (current_tick - wrk->ref_time);
- }
- host_tns = (newtime - WLTIMEDELTA) * 100;
- host_ts = ns_to_timespec(host_tns);
+ reftime = hyperv_cs->read(hyperv_cs);
+ newtime = wrk->host_time + (reftime - wrk->ref_time);
+ host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
- do_settimeofday(&host_ts);
+ do_settimeofday64(&host_ts);
}
/*
@@ -222,22 +239,60 @@ static void hv_set_host_time(struct work_struct *work)
* to discipline the clock.
*/
static struct adj_time_work wrk;
-static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags)
+
+/*
+ * The last time sample, received from the host. PTP device responds to
+ * requests by using this data and the current partition-wide time reference
+ * count.
+ */
+static struct {
+ u64 host_time;
+ u64 ref_time;
+ struct system_time_snapshot snap;
+ spinlock_t lock;
+} host_ts;
+
+static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
{
+ unsigned long flags;
+ u64 cur_reftime;
/*
* This check is safe since we are executing in the
- * interrupt context and time synch messages arre always
+ * interrupt context and time synch messages are always
* delivered on the same CPU.
*/
- if (work_pending(&wrk.work))
- return;
-
- wrk.host_time = hosttime;
- wrk.ref_time = reftime;
- wrk.flags = flags;
- if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) != 0) {
+ if (adj_flags & ICTIMESYNCFLAG_SYNC) {
+ /* Queue a job to do do_settimeofday64() */
+ if (work_pending(&wrk.work))
+ return;
+
+ wrk.host_time = hosttime;
+ wrk.ref_time = reftime;
+ wrk.flags = adj_flags;
schedule_work(&wrk.work);
+ } else {
+ /*
+ * Save the adjusted time sample from the host and the snapshot
+ * of the current system time for PTP device.
+ */
+ spin_lock_irqsave(&host_ts.lock, flags);
+
+ cur_reftime = hyperv_cs->read(hyperv_cs);
+ host_ts.host_time = hosttime;
+ host_ts.ref_time = cur_reftime;
+ ktime_get_snapshot(&host_ts.snap);
+
+ /*
+ * TimeSync v4 messages contain reference time (guest's Hyper-V
+ * clocksource read when the time sample was generated), we can
+ * improve the precision by adding the delta between now and the
+ * time of generation.
+ */
+ if (ts_srv_version > TS_VERSION_3)
+ host_ts.host_time += (cur_reftime - reftime);
+
+ spin_unlock_irqrestore(&host_ts.lock, flags);
}
}
@@ -253,7 +308,6 @@ static void timesync_onchannelcallback(void *context)
struct ictimesync_data *timedatap;
struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
- struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, time_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -263,12 +317,14 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, negop,
- time_txf_buf,
- util_fw_version,
- ts_srv_version);
- pr_info("Using TimeSync version %d.%d\n",
- ts_srv_version >> 16, ts_srv_version & 0xFFFF);
+ if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
+ fw_versions, FW_VER_COUNT,
+ ts_versions, TS_VER_COUNT,
+ NULL, &ts_srv_version)) {
+ pr_info("TimeSync IC version %d.%d\n",
+ ts_srv_version >> 16,
+ ts_srv_version & 0xFFFF);
+ }
} else {
if (ts_srv_version > TS_VERSION_3) {
refdata = (struct ictimesync_ref_data *)
@@ -312,7 +368,6 @@ static void heartbeat_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
- struct icmsg_negotiate *negop = NULL;
while (1) {
@@ -326,9 +381,16 @@ static void heartbeat_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, negop,
- hbeat_txf_buf, util_fw_version,
- hb_srv_version);
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ hbeat_txf_buf,
+ fw_versions, FW_VER_COUNT,
+ hb_versions, HB_VER_COUNT,
+ NULL, &hb_srv_version)) {
+
+ pr_info("Heartbeat IC version %d.%d\n",
+ hb_srv_version >> 16,
+ hb_srv_version & 0xFFFF);
+ }
} else {
heartbeat_msg =
(struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -373,38 +435,10 @@ static int util_probe(struct hv_device *dev,
* Turn off batched reading for all util drivers before we open the
* channel.
*/
-
- set_channel_read_state(dev->channel, false);
+ set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
hv_set_drvdata(dev, srv);
- /*
- * Based on the host; initialize the framework and
- * service version numbers we will negotiate.
- */
- switch (vmbus_proto_version) {
- case (VERSION_WS2008):
- util_fw_version = UTIL_WS2K8_FW_VERSION;
- sd_srv_version = SD_VERSION_1;
- ts_srv_version = TS_VERSION_1;
- hb_srv_version = HB_VERSION_1;
- break;
- case VERSION_WIN7:
- case VERSION_WIN8:
- case VERSION_WIN8_1:
- util_fw_version = UTIL_FW_VERSION;
- sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION_3;
- hb_srv_version = HB_VERSION;
- break;
- case VERSION_WIN10:
- default:
- util_fw_version = UTIL_FW_VERSION;
- sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION;
- hb_srv_version = HB_VERSION;
- }
-
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
@@ -470,14 +504,113 @@ static struct hv_driver util_drv = {
.remove = util_remove,
};
+static int hv_ptp_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ unsigned long flags;
+ u64 newtime, reftime;
+
+ spin_lock_irqsave(&host_ts.lock, flags);
+ reftime = hyperv_cs->read(hyperv_cs);
+ newtime = host_ts.host_time + (reftime - host_ts.ref_time);
+ *ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+ spin_unlock_irqrestore(&host_ts.lock, flags);
+
+ return 0;
+}
+
+static int hv_ptp_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ system->cs = hyperv_cs;
+ system->cycles = host_ts.ref_time;
+ *device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
+
+ return 0;
+}
+
+static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&host_ts.lock, flags);
+
+ /*
+ * host_ts contains the last time sample from the host and the snapshot
+ * of system time. We don't need to calculate the time delta between
+ * the reception and now as get_device_system_crosststamp() does the
+ * required interpolation.
+ */
+ ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
+ NULL, &host_ts.snap, xtstamp);
+
+ spin_unlock_irqrestore(&host_ts.lock, flags);
+
+ return ret;
+}
+
+static struct ptp_clock_info ptp_hyperv_info = {
+ .name = "hyperv",
+ .enable = hv_ptp_enable,
+ .adjtime = hv_ptp_adjtime,
+ .adjfreq = hv_ptp_adjfreq,
+ .gettime64 = hv_ptp_gettime,
+ .getcrosststamp = hv_ptp_getcrosststamp,
+ .settime64 = hv_ptp_settime,
+ .owner = THIS_MODULE,
+};
+
+static struct ptp_clock *hv_ptp_clock;
+
static int hv_timesync_init(struct hv_util_service *srv)
{
+ /* TimeSync requires Hyper-V clocksource. */
+ if (!hyperv_cs)
+ return -ENODEV;
+
INIT_WORK(&wrk.work, hv_set_host_time);
+
+ /*
+ * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
+ * disabled but the driver is still useful without the PTP device
+ * as it still handles the ICTIMESYNCFLAG_SYNC case.
+ */
+ hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
+ if (IS_ERR_OR_NULL(hv_ptp_clock)) {
+ pr_err("cannot register PTP clock: %ld\n",
+ PTR_ERR(hv_ptp_clock));
+ hv_ptp_clock = NULL;
+ }
+
return 0;
}
static void hv_timesync_deinit(void)
{
+ if (hv_ptp_clock)
+ ptp_clock_unregister(hv_ptp_clock);
cancel_work_sync(&wrk.work);
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0675b395ce5c..884f83bba1ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -29,6 +29,7 @@
#include <asm/sync_bitops.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
+#include <linux/interrupt.h>
/*
* Timeout for services such as KVP and fcopy.
@@ -40,95 +41,9 @@
*/
#define HV_UTIL_NEGO_TIMEOUT 55
-/*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HVCPUID_VERSION_FEATURES).
- */
-enum hv_cpuid_function {
- HVCPUID_VERSION_FEATURES = 0x00000001,
- HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
- HVCPUID_INTERFACE = 0x40000001,
-
- /*
- * The remaining functions depend on the value of
- * HVCPUID_INTERFACE
- */
- HVCPUID_VERSION = 0x40000002,
- HVCPUID_FEATURES = 0x40000003,
- HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
- HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
-};
-
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 0x400
-
-#define HV_X64_MSR_CRASH_P0 0x40000100
-#define HV_X64_MSR_CRASH_P1 0x40000101
-#define HV_X64_MSR_CRASH_P2 0x40000102
-#define HV_X64_MSR_CRASH_P3 0x40000103
-#define HV_X64_MSR_CRASH_P4 0x40000104
-#define HV_X64_MSR_CRASH_CTL 0x40000105
-
-#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
-
-/* Define version of the synthetic interrupt controller. */
-#define HV_SYNIC_VERSION (1)
-
-#define HV_ANY_VP (0xFFFFFFFF)
-
/* Define synthetic interrupt controller flag constants. */
#define HV_EVENT_FLAGS_COUNT (256 * 8)
-#define HV_EVENT_FLAGS_BYTE_COUNT (256)
-#define HV_EVENT_FLAGS_DWORD_COUNT (256 / sizeof(u32))
-
-/* Define invalid partition identifier. */
-#define HV_PARTITION_ID_INVALID ((u64)0x0)
-
-/* Define port type. */
-enum hv_port_type {
- HVPORT_MSG = 1,
- HVPORT_EVENT = 2,
- HVPORT_MONITOR = 3
-};
-
-/* Define port information structure. */
-struct hv_port_info {
- enum hv_port_type port_type;
- u32 padding;
- union {
- struct {
- u32 target_sint;
- u32 target_vp;
- u64 rsvdz;
- } message_port_info;
- struct {
- u32 target_sint;
- u32 target_vp;
- u16 base_flag_number;
- u16 flag_count;
- u32 rsvdz;
- } event_port_info;
- struct {
- u64 monitor_address;
- u64 rsvdz;
- } monitor_port_info;
- };
-};
-
-struct hv_connection_info {
- enum hv_port_type port_type;
- u32 padding;
- union {
- struct {
- u64 rsvdz;
- } message_connection_info;
- struct {
- u64 rsvdz;
- } event_connection_info;
- struct {
- u64 monitor_address;
- } monitor_connection_info;
- };
-};
+#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
/*
* Timer configuration register.
@@ -146,18 +61,10 @@ union hv_timer_config {
};
};
-/* Define the number of message buffers associated with each port. */
-#define HV_PORT_MESSAGE_BUFFER_COUNT (16)
/* Define the synthetic interrupt controller event flags format. */
union hv_synic_event_flags {
- u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
- u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
-};
-
-/* Define the synthetic interrupt flags page layout. */
-struct hv_synic_event_flags_page {
- union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
+ unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
};
/* Define SynIC control register. */
@@ -261,6 +168,8 @@ struct hv_monitor_page {
u8 rsvdz4[1984];
};
+#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
+
/* Definition of the hv_post_message hypercall input structure. */
struct hv_input_post_message {
union hv_connection_id connectionid;
@@ -270,56 +179,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/*
- * Versioning definitions used for guests reporting themselves to the
- * hypervisor, and visa versa.
- */
-
-/* Version info reported by guest OS's */
-enum hv_guest_os_vendor {
- HVGUESTOS_VENDOR_MICROSOFT = 0x0001
-};
-
-enum hv_guest_os_microsoft_ids {
- HVGUESTOS_MICROSOFT_UNDEFINED = 0x00,
- HVGUESTOS_MICROSOFT_MSDOS = 0x01,
- HVGUESTOS_MICROSOFT_WINDOWS3X = 0x02,
- HVGUESTOS_MICROSOFT_WINDOWS9X = 0x03,
- HVGUESTOS_MICROSOFT_WINDOWSNT = 0x04,
- HVGUESTOS_MICROSOFT_WINDOWSCE = 0x05
-};
-
-/*
- * Declare the MSR used to identify the guest OS.
- */
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-
-union hv_x64_msr_guest_os_id_contents {
- u64 as_uint64;
- struct {
- u64 build_number:16;
- u64 service_version:8; /* Service Pack, etc. */
- u64 minor_version:8;
- u64 major_version:8;
- u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
- u64 vendor_id:16; /* enum hv_guest_os_vendor */
- };
-};
-
-/*
- * Declare the MSR used to setup pages used to communicate with the hypervisor.
- */
-#define HV_X64_MSR_HYPERCALL 0x40000001
-
-union hv_x64_msr_hypercall_contents {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:11;
- u64 guest_physical_address:52;
- };
-};
-
enum {
VMBUS_MESSAGE_CONNECTION_ID = 1,
@@ -331,111 +190,44 @@ enum {
VMBUS_MESSAGE_SINT = 2,
};
-/* #defines */
-
-#define HV_PRESENT_BIT 0x80000000
-
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * http://msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how Linux guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * Linux and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100
- * 55:48 - Distro specific identification
- * 47:16 - Linux kernel version number
- * 15:0 - Distro specific identification
- *
- *
- */
-
-#define HV_LINUX_VENDOR_ID 0x8100
-
/*
- * Generate the guest ID based on the guideline described above.
+ * Per cpu state for channel handling
*/
+struct hv_per_cpu_context {
+ void *synic_message_page;
+ void *synic_event_page;
+ /*
+ * buffer to post messages to the host.
+ */
+ void *post_msg_page;
-static inline __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version,
- __u16 d_info2)
-{
- __u64 guest_id = 0;
-
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (((__u64)(d_info1)) << 48);
- guest_id |= (((__u64)(kernel_version)) << 16);
- guest_id |= ((__u64)(d_info2));
-
- return guest_id;
-}
-
-
-#define HV_CPU_POWER_MANAGEMENT (1 << 0)
-#define HV_RECOMMENDATIONS_MAX 4
-
-#define HV_X64_MAX 5
-#define HV_CAPS_MAX 8
-
-
-#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
-
-
-/* Service definitions */
-
-#define HV_SERVICE_PARENT_PORT (0)
-#define HV_SERVICE_PARENT_CONNECTION (0)
-
-#define HV_SERVICE_CONNECT_RESPONSE_SUCCESS (0)
-#define HV_SERVICE_CONNECT_RESPONSE_INVALID_PARAMETER (1)
-#define HV_SERVICE_CONNECT_RESPONSE_UNKNOWN_SERVICE (2)
-#define HV_SERVICE_CONNECT_RESPONSE_CONNECTION_REJECTED (3)
-
-#define HV_SERVICE_CONNECT_REQUEST_MESSAGE_ID (1)
-#define HV_SERVICE_CONNECT_RESPONSE_MESSAGE_ID (2)
-#define HV_SERVICE_DISCONNECT_REQUEST_MESSAGE_ID (3)
-#define HV_SERVICE_DISCONNECT_RESPONSE_MESSAGE_ID (4)
-#define HV_SERVICE_MAX_MESSAGE_ID (4)
-
-#define HV_SERVICE_PROTOCOL_VERSION (0x0010)
-#define HV_CONNECT_PAYLOAD_BYTE_COUNT 64
-
-/* #define VMBUS_REVISION_NUMBER 6 */
-
-/* Our local vmbus's port and connection id. Anything >0 is fine */
-/* #define VMBUS_PORT_ID 11 */
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events messages on a per CPU
+ * basis.
+ */
+ struct tasklet_struct msg_dpc;
-/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */
-static const uuid_le VMBUS_SERVICE_ID = {
- .b = {
- 0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c,
- 0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4
- },
+ /*
+ * To optimize the mapping of relid to channel, maintain
+ * per-cpu list of the channels based on their CPU affinity.
+ */
+ struct list_head chan_list;
+ struct clock_event_device *clk_evt;
};
-
-
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
*/
u64 guestid;
- void *hypercall_page;
void *tsc_page;
bool synic_initialized;
- void *synic_message_page[NR_CPUS];
- void *synic_event_page[NR_CPUS];
+ struct hv_per_cpu_context __percpu *cpu_context;
+
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
@@ -446,26 +238,7 @@ struct hv_context {
* Linux cpuid 'a'.
*/
u32 vp_index[NR_CPUS];
- /*
- * Starting with win8, we can take channel interrupts on any CPU;
- * we will manage the tasklet that handles events messages on a per CPU
- * basis.
- */
- struct tasklet_struct *event_dpc[NR_CPUS];
- struct tasklet_struct *msg_dpc[NR_CPUS];
- /*
- * To optimize the mapping of relid to channel, maintain
- * per-cpu list of the channels based on their CPU affinity.
- */
- struct list_head percpu_list[NR_CPUS];
- /*
- * buffer to post messages to the host.
- */
- void *post_msg_page[NR_CPUS];
- /*
- * Support PV clockevent device.
- */
- struct clock_event_device *clk_evt[NR_CPUS];
+
/*
* To manage allocations in a NUMA node.
* Array indexed by numa node ID.
@@ -475,14 +248,6 @@ struct hv_context {
extern struct hv_context hv_context;
-struct ms_hyperv_tsc_page {
- volatile u32 tsc_sequence;
- u32 reserved1;
- volatile u64 tsc_scale;
- volatile s64 tsc_offset;
- u64 reserved2[509];
-};
-
struct hv_ring_buffer_debug_info {
u32 current_interrupt_mask;
u32 current_read_index;
@@ -495,8 +260,6 @@ struct hv_ring_buffer_debug_info {
extern int hv_init(void);
-extern void hv_cleanup(bool crash);
-
extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
@@ -505,20 +268,12 @@ extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
-extern void hv_synic_init(void *irqarg);
+extern int hv_synic_init(unsigned int cpu);
-extern void hv_synic_cleanup(void *arg);
+extern int hv_synic_cleanup(unsigned int cpu);
extern void hv_synic_clockevents_cleanup(void);
-/*
- * Host version information.
- */
-extern unsigned int host_info_eax;
-extern unsigned int host_info_ebx;
-extern unsigned int host_info_ecx;
-extern unsigned int host_info_edx;
-
/* Interface */
@@ -528,20 +283,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
- struct kvec *kv_list,
- u32 kv_count, bool lock,
- bool kick_q);
+ const struct kvec *kv_list, u32 kv_count);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw);
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info);
-
-void hv_begin_read(struct hv_ring_buffer_info *rbi);
-
-u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
/*
* Maximum channels is determined by the size of the interrupt page
@@ -608,6 +357,11 @@ struct vmbus_msginfo {
extern struct vmbus_connection vmbus_connection;
+static inline void vmbus_send_interrupt(u32 relid)
+{
+ sync_set_bit(relid, vmbus_connection.send_int_page);
+}
+
enum vmbus_message_handler_type {
/* The related handler can sleep. */
VMHT_BLOCKING = 0,
@@ -625,41 +379,6 @@ struct vmbus_channel_message_table_entry {
extern struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT];
-/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
-{
- /*
- * On crash we're reading some other CPU's message page and we need
- * to be careful: this other CPU may already had cleared the header
- * and the host may already had delivered some other message there.
- * In case we blindly write msg->header.message_type we're going
- * to lose it. We can still lose a message of the same type but
- * we count on the fact that there can only be one
- * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
- * on crash.
- */
- if (cmpxchg(&msg->header.message_type, old_msg_type,
- HVMSG_NONE) != old_msg_type)
- return;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
-}
/* General vmbus interface */
@@ -670,10 +389,6 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
-/* static void */
-/* VmbusChildDeviceDestroy( */
-/* struct hv_device *); */
-
struct vmbus_channel *relid2channel(u32 relid);
void vmbus_free_channels(void);
@@ -683,7 +398,7 @@ void vmbus_free_channels(void);
int vmbus_connect(void);
void vmbus_disconnect(void);
-int vmbus_post_msg(void *buffer, size_t buflen);
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 308dbda700eb..87799e81af97 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,26 +32,6 @@
#include "hyperv_vmbus.h"
-void hv_begin_read(struct hv_ring_buffer_info *rbi)
-{
- rbi->ring_buffer->interrupt_mask = 1;
- virt_mb();
-}
-
-u32 hv_end_read(struct hv_ring_buffer_info *rbi)
-{
-
- rbi->ring_buffer->interrupt_mask = 0;
- virt_mb();
-
- /*
- * Now check to see if the ring buffer is still empty.
- * If it is not, we raced and we need to process new
- * incoming messages.
- */
- return hv_get_bytes_to_read(rbi);
-}
-
/*
* When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol:
@@ -77,8 +57,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* host logic is fixed.
*/
-static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
- bool kick_q)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
@@ -117,11 +96,9 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
/* Get the next read location for the specified ring buffer. */
static inline u32
-hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
{
- u32 next = ring_info->ring_buffer->read_index;
-
- return next;
+ return ring_info->ring_buffer->read_index;
}
/*
@@ -129,13 +106,14 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
* This allows the caller to skip.
*/
static inline u32
-hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
- u32 offset)
+hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
+ u32 offset)
{
u32 next = ring_info->ring_buffer->read_index;
next += offset;
- next %= ring_info->ring_datasize;
+ if (next >= ring_info->ring_datasize)
+ next -= ring_info->ring_datasize;
return next;
}
@@ -151,7 +129,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
/* Get the size of the ring buffer. */
static inline u32
-hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
{
return ring_info->ring_datasize;
}
@@ -168,7 +146,7 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
* Assume there is enough room. Handles wrap-around in src case only!!
*/
static u32 hv_copyfrom_ringbuffer(
- struct hv_ring_buffer_info *ring_info,
+ const struct hv_ring_buffer_info *ring_info,
void *dest,
u32 destlen,
u32 start_read_offset)
@@ -179,7 +157,8 @@ static u32 hv_copyfrom_ringbuffer(
memcpy(dest, ring_buffer + start_read_offset, destlen);
start_read_offset += destlen;
- start_read_offset %= ring_buffer_size;
+ if (start_read_offset >= ring_buffer_size)
+ start_read_offset -= ring_buffer_size;
return start_read_offset;
}
@@ -192,7 +171,7 @@ static u32 hv_copyfrom_ringbuffer(
static u32 hv_copyto_ringbuffer(
struct hv_ring_buffer_info *ring_info,
u32 start_write_offset,
- void *src,
+ const void *src,
u32 srclen)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
@@ -201,14 +180,15 @@ static u32 hv_copyto_ringbuffer(
memcpy(ring_buffer + start_write_offset, src, srclen);
start_write_offset += srclen;
- start_write_offset %= ring_buffer_size;
+ if (start_write_offset >= ring_buffer_size)
+ start_write_offset -= ring_buffer_size;
return start_write_offset;
}
/* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info)
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
@@ -285,8 +265,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
- struct kvec *kv_list, u32 kv_count, bool lock,
- bool kick_q)
+ const struct kvec *kv_list, u32 kv_count)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -298,13 +277,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
unsigned long flags = 0;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
+ if (channel->rescind)
+ return -ENODEV;
+
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
- if (lock)
- spin_lock_irqsave(&outring_info->ring_lock, flags);
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
@@ -314,8 +295,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
- if (lock)
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
@@ -346,10 +326,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_set_next_write_location(outring_info, next_write_location);
- if (lock)
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ hv_signal_on_write(old_write, channel);
+
+ if (channel->rescind)
+ return -ENODEV;
- hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 230c62e7f567..f7f6b9144b07 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -54,31 +54,7 @@ static struct acpi_device *hv_acpi_dev;
static struct completion probe_event;
-
-static void hyperv_report_panic(struct pt_regs *regs)
-{
- static bool panic_reported;
-
- /*
- * We prefer to report panic on 'die' chain as we have proper
- * registers to report, but if we miss it (e.g. on BUG()) we need
- * to report it on 'panic'.
- */
- if (panic_reported)
- return;
- panic_reported = true;
-
- wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
- wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
- wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
- wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
- wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
-
- /*
- * Let Hyper-V know there is crash data available
- */
- wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
-}
+static int hyperv_cpuhp_online;
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
@@ -859,9 +835,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
kfree(ctx);
}
-static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
+static void hv_process_timer_expiration(struct hv_message *msg,
+ struct hv_per_cpu_context *hv_cpu)
{
- struct clock_event_device *dev = hv_context.clk_evt[cpu];
+ struct clock_event_device *dev = hv_cpu->clk_evt;
if (dev->event_handler)
dev->event_handler(dev);
@@ -871,8 +848,8 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
void vmbus_on_msg_dpc(unsigned long data)
{
- int cpu = smp_processor_id();
- void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_per_cpu_context *hv_cpu = (void *)data;
+ void *page_addr = hv_cpu->synic_message_page;
struct hv_message *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct vmbus_channel_message_header *hdr;
@@ -908,16 +885,88 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
+
+/*
+ * Direct callback for channels using other deferred processing
+ */
+static void vmbus_channel_isr(struct vmbus_channel *channel)
+{
+ void (*callback_fn)(void *);
+
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (likely(callback_fn != NULL))
+ (*callback_fn)(channel->channel_callback_context);
+}
+
+/*
+ * Schedule all channels with events pending
+ */
+static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
+{
+ unsigned long *recv_int_page;
+ u32 maxbits, relid;
+
+ if (vmbus_proto_version < VERSION_WIN8) {
+ maxbits = MAX_NUM_CHANNELS_SUPPORTED;
+ recv_int_page = vmbus_connection.recv_int_page;
+ } else {
+ /*
+ * When the host is win8 and beyond, the event page
+ * can be directly checked to get the id of the channel
+ * that has the interrupt pending.
+ */
+ void *page_addr = hv_cpu->synic_event_page;
+ union hv_synic_event_flags *event
+ = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+
+ maxbits = HV_EVENT_FLAGS_COUNT;
+ recv_int_page = event->flags;
+ }
+
+ if (unlikely(!recv_int_page))
+ return;
+
+ for_each_set_bit(relid, recv_int_page, maxbits) {
+ struct vmbus_channel *channel;
+
+ if (!sync_test_and_clear_bit(relid, recv_int_page))
+ continue;
+
+ /* Special case - vmbus channel protocol msg */
+ if (relid == 0)
+ continue;
+
+ /* Find channel based on relid */
+ list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+ if (channel->offermsg.child_relid != relid)
+ continue;
+
+ switch (channel->callback_mode) {
+ case HV_CALL_ISR:
+ vmbus_channel_isr(channel);
+ break;
+
+ case HV_CALL_BATCHED:
+ hv_begin_read(&channel->inbound);
+ /* fallthrough */
+ case HV_CALL_DIRECT:
+ tasklet_schedule(&channel->callback_event);
+ }
+ }
+ }
+}
+
static void vmbus_isr(void)
{
- int cpu = smp_processor_id();
- void *page_addr;
+ struct hv_per_cpu_context *hv_cpu
+ = this_cpu_ptr(hv_context.cpu_context);
+ void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
bool handled = false;
- page_addr = hv_context.synic_event_page[cpu];
- if (page_addr == NULL)
+ if (unlikely(page_addr == NULL))
return;
event = (union hv_synic_event_flags *)page_addr +
@@ -932,10 +981,8 @@ static void vmbus_isr(void)
(vmbus_proto_version == VERSION_WIN7)) {
/* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0,
- (unsigned long *) &event->flags32[0])) {
+ if (sync_test_and_clear_bit(0, event->flags))
handled = true;
- }
} else {
/*
* Our host is win8 or above. The signaling mechanism
@@ -947,18 +994,17 @@ static void vmbus_isr(void)
}
if (handled)
- tasklet_schedule(hv_context.event_dpc[cpu]);
-
+ vmbus_chan_sched(hv_cpu);
- page_addr = hv_context.synic_message_page[cpu];
+ page_addr = hv_cpu->synic_message_page;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
if (msg->header.message_type != HVMSG_NONE) {
if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
- hv_process_timer_expiration(msg, cpu);
+ hv_process_timer_expiration(msg, hv_cpu);
else
- tasklet_schedule(hv_context.msg_dpc[cpu]);
+ tasklet_schedule(&hv_cpu->msg_dpc);
}
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
@@ -986,7 +1032,7 @@ static int vmbus_bus_init(void)
ret = bus_register(&hv_bus);
if (ret)
- goto err_cleanup;
+ return ret;
hv_setup_vmbus_irq(vmbus_isr);
@@ -997,14 +1043,16 @@ static int vmbus_bus_init(void)
* Initialize the per-cpu interrupt state and
* connect to the host.
*/
- on_each_cpu(hv_synic_init, NULL, 1);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
+ hv_synic_init, hv_synic_cleanup);
+ if (ret < 0)
+ goto err_alloc;
+ hyperv_cpuhp_online = ret;
+
ret = vmbus_connect();
if (ret)
goto err_connect;
- if (vmbus_proto_version > VERSION_WIN7)
- cpu_hotplug_disable();
-
/*
* Only register if the crash MSRs are available
*/
@@ -1019,16 +1067,13 @@ static int vmbus_bus_init(void)
return 0;
err_connect:
- on_each_cpu(hv_synic_cleanup, NULL, 1);
+ cpuhp_remove_state(hyperv_cpuhp_online);
err_alloc:
hv_synic_free();
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
-err_cleanup:
- hv_cleanup(false);
-
return ret;
}
@@ -1478,13 +1523,13 @@ static struct acpi_driver vmbus_acpi_driver = {
static void hv_kexec_handler(void)
{
- int cpu;
-
hv_synic_clockevents_cleanup();
vmbus_initiate_unload(false);
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
- hv_cleanup(false);
+ vmbus_connection.conn_state = DISCONNECTED;
+ /* Make sure conn_state is set as hv_synic_cleanup checks for it */
+ mb();
+ cpuhp_remove_state(hyperv_cpuhp_online);
+ hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -1495,8 +1540,9 @@ static void hv_crash_handler(struct pt_regs *regs)
* doing the cleanup for current CPU only. This should be sufficient
* for kdump.
*/
- hv_synic_cleanup(NULL);
- hv_cleanup(true);
+ vmbus_connection.conn_state = DISCONNECTED;
+ hv_synic_cleanup(smp_processor_id());
+ hyperv_cleanup();
};
static int __init hv_acpi_init(void)
@@ -1547,24 +1593,24 @@ static void __exit vmbus_exit(void)
hv_synic_clockevents_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
- for_each_online_cpu(cpu)
- tasklet_kill(hv_context.msg_dpc[cpu]);
+ for_each_online_cpu(cpu) {
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ tasklet_kill(&hv_cpu->msg_dpc);
+ }
vmbus_free_channels();
+
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
unregister_die_notifier(&hyperv_die_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
bus_unregister(&hv_bus);
- hv_cleanup(false);
- for_each_online_cpu(cpu) {
- tasklet_kill(hv_context.event_dpc[cpu]);
- smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
- }
+
+ cpuhp_remove_state(hyperv_cpuhp_online);
hv_synic_free();
acpi_bus_unregister_driver(&vmbus_acpi_driver);
- if (vmbus_proto_version > VERSION_WIN7)
- cpu_hotplug_enable();
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 17741969026e..26cfac3e6de7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -242,6 +242,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
if (!sink_ops(sink)->alloc_buffer)
goto err;
+ cpu = cpumask_first(mask);
/* Get the AUX specific data from the sink buffer */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 031480f2c34d..d1340fb4e457 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -216,10 +216,14 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
goto out;
/* Go from generic option to ETMv4 specifics */
- if (attr->config & BIT(ETM_OPT_CYCACC))
- config->cfg |= ETMv4_MODE_CYCACC;
+ if (attr->config & BIT(ETM_OPT_CYCACC)) {
+ config->cfg |= BIT(4);
+ /* TRM: Must program this for cycacc to work */
+ config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
+ }
if (attr->config & BIT(ETM_OPT_TS))
- config->cfg |= ETMv4_MODE_TIMESTAMP;
+ /* bit[11], Global timestamp tracing bit */
+ config->cfg |= BIT(11);
out:
return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ba8d3f86de21..b3b5ea7b7fb3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -146,6 +146,7 @@
#define ETM_ARCH_V4 0x40
#define ETMv4_SYNC_MASK 0x1F
#define ETM_CYC_THRESHOLD_MASK 0xFFF
+#define ETM_CYC_THRESHOLD_DEFAULT 0x100
#define ETMv4_EVENT_MASK 0xFF
#define ETM_CNTR_MAX_VAL 0xFFFF
#define ETM_TRACEID_MASK 0x3f
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index e4c55c5f9988..93fc26f01bab 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return;
- stm_disable(drvdata->csdev, NULL);
+ coresight_disable(drvdata->csdev);
}
static phys_addr_t
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index a579a0f25840..22c1aeeb6421 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/ti-aemif.h>
#define TA_SHIFT 2
#define RHOLD_SHIFT 4
@@ -335,6 +336,8 @@ static int aemif_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *child_np;
struct aemif_device *aemif;
+ struct aemif_platform_data *pdata;
+ struct of_dev_auxdata *dev_lookup;
if (np == NULL)
return 0;
@@ -343,6 +346,9 @@ static int aemif_probe(struct platform_device *pdev)
if (!aemif)
return -ENOMEM;
+ pdata = dev_get_platdata(&pdev->dev);
+ dev_lookup = pdata ? pdata->dev_lookup : NULL;
+
platform_set_drvdata(pdev, aemif);
aemif->clk = devm_clk_get(dev, NULL);
@@ -390,7 +396,7 @@ static int aemif_probe(struct platform_device *pdev)
* parameters are set.
*/
for_each_available_child_of_node(np, child_np) {
- ret = of_platform_populate(child_np, NULL, NULL, dev);
+ ret = of_platform_populate(child_np, NULL, dev_lookup, dev);
if (ret < 0)
goto error;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 64971baf11fa..c290990d73ed 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -474,11 +474,15 @@ config SRAM
bool "Generic on-chip SRAM driver"
depends on HAS_IOMEM
select GENERIC_ALLOCATOR
+ select SRAM_EXEC if ARM
help
This driver allows you to declare a memory region to be managed by
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config SRAM_EXEC
+ bool
+
config VEXPRESS_SYSCFG
bool "Versatile Express System Configuration driver"
depends on VEXPRESS_CONFIG
@@ -487,6 +491,7 @@ config VEXPRESS_SYSCFG
ARM Ltd. Versatile Express uses specialised platform configuration
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
+
config PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
@@ -494,14 +499,14 @@ config PANEL
Say Y here if you have an HD44780 or KS-0074 LCD connected to your
parallel port. This driver also features 4 and 6-key keypads. The LCD
is accessible through the /dev/lcd char device (10, 156), and the
- keypad through /dev/keypad (10, 185). Both require misc device to be
- enabled. This code can either be compiled as a module, or linked into
- the kernel and started at boot. If you don't understand what all this
- is about, say N.
+ keypad through /dev/keypad (10, 185). This code can either be
+ compiled as a module, or linked into the kernel and started at boot.
+ If you don't understand what all this is about, say N.
+
+if PANEL
config PANEL_PARPORT
int "Default parallel port number (0=LPT1)"
- depends on PANEL
range 0 255
default "0"
---help---
@@ -513,7 +518,6 @@ config PANEL_PARPORT
config PANEL_PROFILE
int "Default panel profile (0-5, 0=custom)"
- depends on PANEL
range 0 5
default "5"
---help---
@@ -534,7 +538,7 @@ config PANEL_PROFILE
for experts.
config PANEL_KEYPAD
- depends on PANEL && PANEL_PROFILE="0"
+ depends on PANEL_PROFILE="0"
int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
range 0 3
default 0
@@ -551,7 +555,7 @@ config PANEL_KEYPAD
supports simultaneous keys pressed when the keypad supports them.
config PANEL_LCD
- depends on PANEL && PANEL_PROFILE="0"
+ depends on PANEL_PROFILE="0"
int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
range 0 5
default 0
@@ -574,7 +578,7 @@ config PANEL_LCD
that those values changed from the 2.4 driver for better consistency.
config PANEL_LCD_HEIGHT
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Number of lines on the LCD (1-2)"
range 1 2
default 2
@@ -583,7 +587,7 @@ config PANEL_LCD_HEIGHT
It can either be 1 or 2.
config PANEL_LCD_WIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Number of characters per line on the LCD (1-40)"
range 1 40
default 40
@@ -592,7 +596,7 @@ config PANEL_LCD_WIDTH
Common values are 16,20,24,40.
config PANEL_LCD_BWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Internal LCD line width (1-40, 40 by default)"
range 1 40
default 40
@@ -608,7 +612,7 @@ config PANEL_LCD_BWIDTH
If you don't know, put '40' here.
config PANEL_LCD_HWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Hardware LCD line width (1-64, 64 by default)"
range 1 64
default 64
@@ -622,7 +626,7 @@ config PANEL_LCD_HWIDTH
64 here for a 2x40.
config PANEL_LCD_CHARSET
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "LCD character set (0=normal, 1=KS0074)"
range 0 1
default 0
@@ -638,7 +642,7 @@ config PANEL_LCD_CHARSET
If you don't know, use the normal one (0).
config PANEL_LCD_PROTO
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "LCD communication mode (0=parallel 8 bits, 1=serial)"
range 0 1
default 0
@@ -651,7 +655,7 @@ config PANEL_LCD_PROTO
parallel LCD, and 1 for a serial LCD.
config PANEL_LCD_PIN_E
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
range -17 17
default 14
@@ -666,7 +670,7 @@ config PANEL_LCD_PIN_E
Default for the 'E' pin in custom profile is '14' (AUTOFEED).
config PANEL_LCD_PIN_RS
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
range -17 17
default 17
@@ -681,7 +685,7 @@ config PANEL_LCD_PIN_RS
Default for the 'RS' pin in custom profile is '17' (SELECT IN).
config PANEL_LCD_PIN_RW
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
range -17 17
default 16
@@ -696,7 +700,7 @@ config PANEL_LCD_PIN_RW
Default for the 'RW' pin in custom profile is '16' (INIT).
config PANEL_LCD_PIN_SCL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
range -17 17
default 1
@@ -711,7 +715,7 @@ config PANEL_LCD_PIN_SCL
Default for the 'SCL' pin in custom profile is '1' (STROBE).
config PANEL_LCD_PIN_SDA
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
range -17 17
default 2
@@ -726,7 +730,7 @@ config PANEL_LCD_PIN_SDA
Default for the 'SDA' pin in custom profile is '2' (D0).
config PANEL_LCD_PIN_BL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
range -17 17
default 0
@@ -741,7 +745,6 @@ config PANEL_LCD_PIN_BL
Default for the 'BL' pin in custom profile is '0' (uncontrolled).
config PANEL_CHANGE_MESSAGE
- depends on PANEL
bool "Change LCD initialization message ?"
default "n"
---help---
@@ -754,7 +757,7 @@ config PANEL_CHANGE_MESSAGE
say 'N' and keep the default message with the version.
config PANEL_BOOT_MESSAGE
- depends on PANEL && PANEL_CHANGE_MESSAGE="y"
+ depends on PANEL_CHANGE_MESSAGE="y"
string "New initialization message"
default ""
---help---
@@ -766,6 +769,8 @@ config PANEL_BOOT_MESSAGE
An empty message will only clear the display at driver init time. Any other
printf()-formatted message is valid with newline and escape codes.
+endif # PANEL
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 31983366090a..7a3ea89339b4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
+obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index c4e41c26649e..de58762097c4 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -100,4 +100,14 @@ config EEPROM_DIGSY_MTC_CFG
If unsure, say N.
+config EEPROM_IDT_89HPESX
+ tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support"
+ depends on I2C && SYSFS
+ help
+ Enable this driver to get read/write access to EEPROM / CSRs
+ over IDT PCIe-swtich i2c-slave interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called idt_89hpesx.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index fc1e81d29267..90a52624ddeb 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
+obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
new file mode 100644
index 000000000000..4a22a1d99395
--- /dev/null
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -0,0 +1,1581 @@
+/*
+ * This file is provided under a GPLv2 license. When using or
+ * redistributing this file, you may do so under that license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, it can be found <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+/*
+ * NOTE of the IDT 89HPESx SMBus-slave interface driver
+ * This driver primarily is developed to have an access to EEPROM device of
+ * IDT PCIe-switches. IDT provides a simple SMBus interface to perform IO-
+ * operations from/to EEPROM, which is located at private (so called Master)
+ * SMBus of switches. Using that interface this the driver creates a simple
+ * binary sysfs-file in the device directory:
+ * /sys/bus/i2c/devices/<bus>-<devaddr>/eeprom
+ * In case if read-only flag is specified in the dts-node of device desription,
+ * User-space applications won't be able to write to the EEPROM sysfs-node.
+ * Additionally IDT 89HPESx SMBus interface has an ability to write/read
+ * data of device CSRs. This driver exposes debugf-file to perform simple IO
+ * operations using that ability for just basic debug purpose. Particularly
+ * next file is created in the specific debugfs-directory:
+ * /sys/kernel/debug/idt_csr/
+ * Format of the debugfs-node is:
+ * $ cat /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
+ * <CSR address>:<CSR value>
+ * So reading the content of the file gives current CSR address and it value.
+ * If User-space application wishes to change current CSR address,
+ * it can just write a proper value to the sysfs-file:
+ * $ echo "<CSR address>" > /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>
+ * If it wants to change the CSR value as well, the format of the write
+ * operation is:
+ * $ echo "<CSR address>:<CSR value>" > \
+ * /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
+ * CSR address and value can be any of hexadecimal, decimal or octal format.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#define IDT_NAME "89hpesx"
+#define IDT_89HPESX_DESC "IDT 89HPESx SMBus-slave interface driver"
+#define IDT_89HPESX_VER "1.0"
+
+MODULE_DESCRIPTION(IDT_89HPESX_DESC);
+MODULE_VERSION(IDT_89HPESX_VER);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("T-platforms");
+
+/*
+ * csr_dbgdir - CSR read/write operations Debugfs directory
+ */
+static struct dentry *csr_dbgdir;
+
+/*
+ * struct idt_89hpesx_dev - IDT 89HPESx device data structure
+ * @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible")
+ * @eero: EEPROM Read-only flag
+ * @eeaddr: EEPROM custom address
+ *
+ * @inieecmd: Initial cmd value for EEPROM read/write operations
+ * @inicsrcmd: Initial cmd value for CSR read/write operations
+ * @iniccode: Initialial command code value for IO-operations
+ *
+ * @csr: CSR address to perform read operation
+ *
+ * @smb_write: SMBus write method
+ * @smb_read: SMBus read method
+ * @smb_mtx: SMBus mutex
+ *
+ * @client: i2c client used to perform IO operations
+ *
+ * @ee_file: EEPROM read/write sysfs-file
+ * @csr_file: CSR read/write debugfs-node
+ */
+struct idt_smb_seq;
+struct idt_89hpesx_dev {
+ u32 eesize;
+ bool eero;
+ u8 eeaddr;
+
+ u8 inieecmd;
+ u8 inicsrcmd;
+ u8 iniccode;
+
+ u16 csr;
+
+ int (*smb_write)(struct idt_89hpesx_dev *, const struct idt_smb_seq *);
+ int (*smb_read)(struct idt_89hpesx_dev *, struct idt_smb_seq *);
+ struct mutex smb_mtx;
+
+ struct i2c_client *client;
+
+ struct bin_attribute *ee_file;
+ struct dentry *csr_dir;
+ struct dentry *csr_file;
+};
+
+/*
+ * struct idt_smb_seq - sequence of data to be read/written from/to IDT 89HPESx
+ * @ccode: SMBus command code
+ * @bytecnt: Byte count of operation
+ * @data: Data to by written
+ */
+struct idt_smb_seq {
+ u8 ccode;
+ u8 bytecnt;
+ u8 *data;
+};
+
+/*
+ * struct idt_eeprom_seq - sequence of data to be read/written from/to EEPROM
+ * @cmd: Transaction CMD
+ * @eeaddr: EEPROM custom address
+ * @memaddr: Internal memory address of EEPROM
+ * @data: Data to be written at the memory address
+ */
+struct idt_eeprom_seq {
+ u8 cmd;
+ u8 eeaddr;
+ u16 memaddr;
+ u8 data;
+} __packed;
+
+/*
+ * struct idt_csr_seq - sequence of data to be read/written from/to CSR
+ * @cmd: Transaction CMD
+ * @csraddr: Internal IDT device CSR address
+ * @data: Data to be read/written from/to the CSR address
+ */
+struct idt_csr_seq {
+ u8 cmd;
+ u16 csraddr;
+ u32 data;
+} __packed;
+
+/*
+ * SMBus command code macros
+ * @CCODE_END: Indicates the end of transaction
+ * @CCODE_START: Indicates the start of transaction
+ * @CCODE_CSR: CSR read/write transaction
+ * @CCODE_EEPROM: EEPROM read/write transaction
+ * @CCODE_BYTE: Supplied data has BYTE length
+ * @CCODE_WORD: Supplied data has WORD length
+ * @CCODE_BLOCK: Supplied data has variable length passed in bytecnt
+ * byte right following CCODE byte
+ */
+#define CCODE_END ((u8)0x01)
+#define CCODE_START ((u8)0x02)
+#define CCODE_CSR ((u8)0x00)
+#define CCODE_EEPROM ((u8)0x04)
+#define CCODE_BYTE ((u8)0x00)
+#define CCODE_WORD ((u8)0x20)
+#define CCODE_BLOCK ((u8)0x40)
+#define CCODE_PEC ((u8)0x80)
+
+/*
+ * EEPROM command macros
+ * @EEPROM_OP_WRITE: EEPROM write operation
+ * @EEPROM_OP_READ: EEPROM read operation
+ * @EEPROM_USA: Use specified address of EEPROM
+ * @EEPROM_NAERR: EEPROM device is not ready to respond
+ * @EEPROM_LAERR: EEPROM arbitration loss error
+ * @EEPROM_MSS: EEPROM misplace start & stop bits error
+ * @EEPROM_WR_CNT: Bytes count to perform write operation
+ * @EEPROM_WRRD_CNT: Bytes count to write before reading
+ * @EEPROM_RD_CNT: Bytes count to perform read operation
+ * @EEPROM_DEF_SIZE: Fall back size of EEPROM
+ * @EEPROM_DEF_ADDR: Defatul EEPROM address
+ * @EEPROM_TOUT: Timeout before retry read operation if eeprom is busy
+ */
+#define EEPROM_OP_WRITE ((u8)0x00)
+#define EEPROM_OP_READ ((u8)0x01)
+#define EEPROM_USA ((u8)0x02)
+#define EEPROM_NAERR ((u8)0x08)
+#define EEPROM_LAERR ((u8)0x10)
+#define EEPROM_MSS ((u8)0x20)
+#define EEPROM_WR_CNT ((u8)5)
+#define EEPROM_WRRD_CNT ((u8)4)
+#define EEPROM_RD_CNT ((u8)5)
+#define EEPROM_DEF_SIZE ((u16)4096)
+#define EEPROM_DEF_ADDR ((u8)0x50)
+#define EEPROM_TOUT (100)
+
+/*
+ * CSR command macros
+ * @CSR_DWE: Enable all four bytes of the operation
+ * @CSR_OP_WRITE: CSR write operation
+ * @CSR_OP_READ: CSR read operation
+ * @CSR_RERR: Read operation error
+ * @CSR_WERR: Write operation error
+ * @CSR_WR_CNT: Bytes count to perform write operation
+ * @CSR_WRRD_CNT: Bytes count to write before reading
+ * @CSR_RD_CNT: Bytes count to perform read operation
+ * @CSR_MAX: Maximum CSR address
+ * @CSR_DEF: Default CSR address
+ * @CSR_REAL_ADDR: CSR real unshifted address
+ */
+#define CSR_DWE ((u8)0x0F)
+#define CSR_OP_WRITE ((u8)0x00)
+#define CSR_OP_READ ((u8)0x10)
+#define CSR_RERR ((u8)0x40)
+#define CSR_WERR ((u8)0x80)
+#define CSR_WR_CNT ((u8)7)
+#define CSR_WRRD_CNT ((u8)3)
+#define CSR_RD_CNT ((u8)7)
+#define CSR_MAX ((u32)0x3FFFF)
+#define CSR_DEF ((u16)0x0000)
+#define CSR_REAL_ADDR(val) ((unsigned int)val << 2)
+
+/*
+ * IDT 89HPESx basic register
+ * @IDT_VIDDID_CSR: PCIe VID and DID of IDT 89HPESx
+ * @IDT_VID_MASK: Mask of VID
+ */
+#define IDT_VIDDID_CSR ((u32)0x0000)
+#define IDT_VID_MASK ((u32)0xFFFF)
+
+/*
+ * IDT 89HPESx can send NACK when new command is sent before previous one
+ * fininshed execution. In this case driver retries operation
+ * certain times.
+ * @RETRY_CNT: Number of retries before giving up and fail
+ * @idt_smb_safe: Generate a retry loop on corresponding SMBus method
+ */
+#define RETRY_CNT (128)
+#define idt_smb_safe(ops, args...) ({ \
+ int __retry = RETRY_CNT; \
+ s32 __sts; \
+ do { \
+ __sts = i2c_smbus_ ## ops ## _data(args); \
+ } while (__retry-- && __sts < 0); \
+ __sts; \
+})
+
+/*===========================================================================
+ * i2c bus level IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_smb_write_byte() - SMBus write method when I2C_SMBUS_BYTE_DATA operation
+ * is only available
+ * @pdev: Pointer to the driver data
+ * @seq: Sequence of data to be written
+ */
+static int idt_smb_write_byte(struct idt_89hpesx_dev *pdev,
+ const struct idt_smb_seq *seq)
+{
+ s32 sts;
+ u8 ccode;
+ int idx;
+
+ /* Loop over the supplied data sending byte one-by-one */
+ for (idx = 0; idx < seq->bytecnt; idx++) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BYTE;
+ if (idx == 0)
+ ccode |= CCODE_START;
+ if (idx == seq->bytecnt - 1)
+ ccode |= CCODE_END;
+
+ /* Send data to the device */
+ sts = idt_smb_safe(write_byte, pdev->client, ccode,
+ seq->data[idx]);
+ if (sts != 0)
+ return (int)sts;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_smb_read_byte() - SMBus read method when I2C_SMBUS_BYTE_DATA operation
+ * is only available
+ * @pdev: Pointer to the driver data
+ * @seq: Buffer to read data to
+ */
+static int idt_smb_read_byte(struct idt_89hpesx_dev *pdev,
+ struct idt_smb_seq *seq)
+{
+ s32 sts;
+ u8 ccode;
+ int idx;
+
+ /* Loop over the supplied buffer receiving byte one-by-one */
+ for (idx = 0; idx < seq->bytecnt; idx++) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BYTE;
+ if (idx == 0)
+ ccode |= CCODE_START;
+ if (idx == seq->bytecnt - 1)
+ ccode |= CCODE_END;
+
+ /* Read data from the device */
+ sts = idt_smb_safe(read_byte, pdev->client, ccode);
+ if (sts < 0)
+ return (int)sts;
+
+ seq->data[idx] = (u8)sts;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_smb_write_word() - SMBus write method when I2C_SMBUS_BYTE_DATA and
+ * I2C_FUNC_SMBUS_WORD_DATA operations are available
+ * @pdev: Pointer to the driver data
+ * @seq: Sequence of data to be written
+ */
+static int idt_smb_write_word(struct idt_89hpesx_dev *pdev,
+ const struct idt_smb_seq *seq)
+{
+ s32 sts;
+ u8 ccode;
+ int idx, evencnt;
+
+ /* Calculate the even count of data to send */
+ evencnt = seq->bytecnt - (seq->bytecnt % 2);
+
+ /* Loop over the supplied data sending two bytes at a time */
+ for (idx = 0; idx < evencnt; idx += 2) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_WORD;
+ if (idx == 0)
+ ccode |= CCODE_START;
+ if (idx == evencnt - 2)
+ ccode |= CCODE_END;
+
+ /* Send word data to the device */
+ sts = idt_smb_safe(write_word, pdev->client, ccode,
+ *(u16 *)&seq->data[idx]);
+ if (sts != 0)
+ return (int)sts;
+ }
+
+ /* If there is odd number of bytes then send just one last byte */
+ if (seq->bytecnt != evencnt) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BYTE | CCODE_END;
+ if (idx == 0)
+ ccode |= CCODE_START;
+
+ /* Send byte data to the device */
+ sts = idt_smb_safe(write_byte, pdev->client, ccode,
+ seq->data[idx]);
+ if (sts != 0)
+ return (int)sts;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_smb_read_word() - SMBus read method when I2C_SMBUS_BYTE_DATA and
+ * I2C_FUNC_SMBUS_WORD_DATA operations are available
+ * @pdev: Pointer to the driver data
+ * @seq: Buffer to read data to
+ */
+static int idt_smb_read_word(struct idt_89hpesx_dev *pdev,
+ struct idt_smb_seq *seq)
+{
+ s32 sts;
+ u8 ccode;
+ int idx, evencnt;
+
+ /* Calculate the even count of data to send */
+ evencnt = seq->bytecnt - (seq->bytecnt % 2);
+
+ /* Loop over the supplied data reading two bytes at a time */
+ for (idx = 0; idx < evencnt; idx += 2) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_WORD;
+ if (idx == 0)
+ ccode |= CCODE_START;
+ if (idx == evencnt - 2)
+ ccode |= CCODE_END;
+
+ /* Read word data from the device */
+ sts = idt_smb_safe(read_word, pdev->client, ccode);
+ if (sts < 0)
+ return (int)sts;
+
+ *(u16 *)&seq->data[idx] = (u16)sts;
+ }
+
+ /* If there is odd number of bytes then receive just one last byte */
+ if (seq->bytecnt != evencnt) {
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BYTE | CCODE_END;
+ if (idx == 0)
+ ccode |= CCODE_START;
+
+ /* Read last data byte from the device */
+ sts = idt_smb_safe(read_byte, pdev->client, ccode);
+ if (sts < 0)
+ return (int)sts;
+
+ seq->data[idx] = (u8)sts;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_smb_write_block() - SMBus write method when I2C_SMBUS_BLOCK_DATA
+ * operation is available
+ * @pdev: Pointer to the driver data
+ * @seq: Sequence of data to be written
+ */
+static int idt_smb_write_block(struct idt_89hpesx_dev *pdev,
+ const struct idt_smb_seq *seq)
+{
+ u8 ccode;
+
+ /* Return error if too much data passed to send */
+ if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+ /* Send block of data to the device */
+ return idt_smb_safe(write_block, pdev->client, ccode, seq->bytecnt,
+ seq->data);
+}
+
+/*
+ * idt_smb_read_block() - SMBus read method when I2C_SMBUS_BLOCK_DATA
+ * operation is available
+ * @pdev: Pointer to the driver data
+ * @seq: Buffer to read data to
+ */
+static int idt_smb_read_block(struct idt_89hpesx_dev *pdev,
+ struct idt_smb_seq *seq)
+{
+ s32 sts;
+ u8 ccode;
+
+ /* Return error if too much data passed to send */
+ if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+ /* Read block of data from the device */
+ sts = idt_smb_safe(read_block, pdev->client, ccode, seq->data);
+ if (sts != seq->bytecnt)
+ return (sts < 0 ? sts : -ENODATA);
+
+ return 0;
+}
+
+/*
+ * idt_smb_write_i2c_block() - SMBus write method when I2C_SMBUS_I2C_BLOCK_DATA
+ * operation is available
+ * @pdev: Pointer to the driver data
+ * @seq: Sequence of data to be written
+ *
+ * NOTE It's usual SMBus write block operation, except the actual data length is
+ * sent as first byte of data
+ */
+static int idt_smb_write_i2c_block(struct idt_89hpesx_dev *pdev,
+ const struct idt_smb_seq *seq)
+{
+ u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
+
+ /* Return error if too much data passed to send */
+ if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
+ /* Collect the data to send. Length byte must be added prior the data */
+ buf[0] = seq->bytecnt;
+ memcpy(&buf[1], seq->data, seq->bytecnt);
+
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+ /* Send length and block of data to the device */
+ return idt_smb_safe(write_i2c_block, pdev->client, ccode,
+ seq->bytecnt + 1, buf);
+}
+
+/*
+ * idt_smb_read_i2c_block() - SMBus read method when I2C_SMBUS_I2C_BLOCK_DATA
+ * operation is available
+ * @pdev: Pointer to the driver data
+ * @seq: Buffer to read data to
+ *
+ * NOTE It's usual SMBus read block operation, except the actual data length is
+ * retrieved as first byte of data
+ */
+static int idt_smb_read_i2c_block(struct idt_89hpesx_dev *pdev,
+ struct idt_smb_seq *seq)
+{
+ u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
+ s32 sts;
+
+ /* Return error if too much data passed to send */
+ if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
+ /* Collect the command code byte */
+ ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+ /* Read length and block of data from the device */
+ sts = idt_smb_safe(read_i2c_block, pdev->client, ccode,
+ seq->bytecnt + 1, buf);
+ if (sts != seq->bytecnt + 1)
+ return (sts < 0 ? sts : -ENODATA);
+ if (buf[0] != seq->bytecnt)
+ return -ENODATA;
+
+ /* Copy retrieved data to the output data buffer */
+ memcpy(seq->data, &buf[1], seq->bytecnt);
+
+ return 0;
+}
+
+/*===========================================================================
+ * EEPROM IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_eeprom_read_byte() - read just one byte from EEPROM
+ * @pdev: Pointer to the driver data
+ * @memaddr: Start EEPROM memory address
+ * @data: Data to be written to EEPROM
+ */
+static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
+ u8 *data)
+{
+ struct device *dev = &pdev->client->dev;
+ struct idt_eeprom_seq eeseq;
+ struct idt_smb_seq smbseq;
+ int ret, retry;
+
+ /* Initialize SMBus sequence fields */
+ smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
+ smbseq.data = (u8 *)&eeseq;
+
+ /*
+ * Sometimes EEPROM may respond with NACK if it's busy with previous
+ * operation, so we need to perform a few attempts of read cycle
+ */
+ retry = RETRY_CNT;
+ do {
+ /* Send EEPROM memory address to read data from */
+ smbseq.bytecnt = EEPROM_WRRD_CNT;
+ eeseq.cmd = pdev->inieecmd | EEPROM_OP_READ;
+ eeseq.eeaddr = pdev->eeaddr;
+ eeseq.memaddr = cpu_to_le16(memaddr);
+ ret = pdev->smb_write(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to init eeprom addr 0x%02hhx",
+ memaddr);
+ break;
+ }
+
+ /* Perform read operation */
+ smbseq.bytecnt = EEPROM_RD_CNT;
+ ret = pdev->smb_read(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read eeprom data 0x%02hhx",
+ memaddr);
+ break;
+ }
+
+ /* Restart read operation if the device is busy */
+ if (retry && (eeseq.cmd & EEPROM_NAERR)) {
+ dev_dbg(dev, "EEPROM busy, retry reading after %d ms",
+ EEPROM_TOUT);
+ msleep(EEPROM_TOUT);
+ continue;
+ }
+
+ /* Check whether IDT successfully read data from EEPROM */
+ if (eeseq.cmd & (EEPROM_NAERR | EEPROM_LAERR | EEPROM_MSS)) {
+ dev_err(dev,
+ "Communication with eeprom failed, cmd 0x%hhx",
+ eeseq.cmd);
+ ret = -EREMOTEIO;
+ break;
+ }
+
+ /* Save retrieved data and exit the loop */
+ *data = eeseq.data;
+ break;
+ } while (retry--);
+
+ /* Return the status of operation */
+ return ret;
+}
+
+/*
+ * idt_eeprom_write() - EEPROM write operation
+ * @pdev: Pointer to the driver data
+ * @memaddr: Start EEPROM memory address
+ * @len: Length of data to be written
+ * @data: Data to be written to EEPROM
+ */
+static int idt_eeprom_write(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
+ const u8 *data)
+{
+ struct device *dev = &pdev->client->dev;
+ struct idt_eeprom_seq eeseq;
+ struct idt_smb_seq smbseq;
+ int ret;
+ u16 idx;
+
+ /* Initialize SMBus sequence fields */
+ smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
+ smbseq.data = (u8 *)&eeseq;
+
+ /* Send data byte-by-byte, checking if it is successfully written */
+ for (idx = 0; idx < len; idx++, memaddr++) {
+ /* Lock IDT SMBus device */
+ mutex_lock(&pdev->smb_mtx);
+
+ /* Perform write operation */
+ smbseq.bytecnt = EEPROM_WR_CNT;
+ eeseq.cmd = pdev->inieecmd | EEPROM_OP_WRITE;
+ eeseq.eeaddr = pdev->eeaddr;
+ eeseq.memaddr = cpu_to_le16(memaddr);
+ eeseq.data = data[idx];
+ ret = pdev->smb_write(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev,
+ "Failed to write 0x%04hx:0x%02hhx to eeprom",
+ memaddr, data[idx]);
+ goto err_mutex_unlock;
+ }
+
+ /*
+ * Check whether the data is successfully written by reading
+ * from the same EEPROM memory address.
+ */
+ eeseq.data = ~data[idx];
+ ret = idt_eeprom_read_byte(pdev, memaddr, &eeseq.data);
+ if (ret != 0)
+ goto err_mutex_unlock;
+
+ /* Check whether the read byte is the same as written one */
+ if (eeseq.data != data[idx]) {
+ dev_err(dev, "Values don't match 0x%02hhx != 0x%02hhx",
+ eeseq.data, data[idx]);
+ ret = -EREMOTEIO;
+ goto err_mutex_unlock;
+ }
+
+ /* Unlock IDT SMBus device */
+err_mutex_unlock:
+ mutex_unlock(&pdev->smb_mtx);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_eeprom_read() - EEPROM read operation
+ * @pdev: Pointer to the driver data
+ * @memaddr: Start EEPROM memory address
+ * @len: Length of data to read
+ * @buf: Buffer to read data to
+ */
+static int idt_eeprom_read(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
+ u8 *buf)
+{
+ int ret;
+ u16 idx;
+
+ /* Read data byte-by-byte, retrying if it wasn't successful */
+ for (idx = 0; idx < len; idx++, memaddr++) {
+ /* Lock IDT SMBus device */
+ mutex_lock(&pdev->smb_mtx);
+
+ /* Just read the byte to the buffer */
+ ret = idt_eeprom_read_byte(pdev, memaddr, &buf[idx]);
+
+ /* Unlock IDT SMBus device */
+ mutex_unlock(&pdev->smb_mtx);
+
+ /* Return error if read operation failed */
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*===========================================================================
+ * CSR IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_csr_write() - CSR write operation
+ * @pdev: Pointer to the driver data
+ * @csraddr: CSR address (with no two LS bits)
+ * @data: Data to be written to CSR
+ */
+static int idt_csr_write(struct idt_89hpesx_dev *pdev, u16 csraddr,
+ const u32 data)
+{
+ struct device *dev = &pdev->client->dev;
+ struct idt_csr_seq csrseq;
+ struct idt_smb_seq smbseq;
+ int ret;
+
+ /* Initialize SMBus sequence fields */
+ smbseq.ccode = pdev->iniccode | CCODE_CSR;
+ smbseq.data = (u8 *)&csrseq;
+
+ /* Lock IDT SMBus device */
+ mutex_lock(&pdev->smb_mtx);
+
+ /* Perform write operation */
+ smbseq.bytecnt = CSR_WR_CNT;
+ csrseq.cmd = pdev->inicsrcmd | CSR_OP_WRITE;
+ csrseq.csraddr = cpu_to_le16(csraddr);
+ csrseq.data = cpu_to_le32(data);
+ ret = pdev->smb_write(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to write 0x%04x: 0x%04x to csr",
+ CSR_REAL_ADDR(csraddr), data);
+ goto err_mutex_unlock;
+ }
+
+ /* Send CSR address to read data from */
+ smbseq.bytecnt = CSR_WRRD_CNT;
+ csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
+ ret = pdev->smb_write(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to init csr address 0x%04x",
+ CSR_REAL_ADDR(csraddr));
+ goto err_mutex_unlock;
+ }
+
+ /* Perform read operation */
+ smbseq.bytecnt = CSR_RD_CNT;
+ ret = pdev->smb_read(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read csr 0x%04x",
+ CSR_REAL_ADDR(csraddr));
+ goto err_mutex_unlock;
+ }
+
+ /* Check whether IDT successfully retrieved CSR data */
+ if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
+ dev_err(dev, "IDT failed to perform CSR r/w");
+ ret = -EREMOTEIO;
+ goto err_mutex_unlock;
+ }
+
+ /* Unlock IDT SMBus device */
+err_mutex_unlock:
+ mutex_unlock(&pdev->smb_mtx);
+
+ return ret;
+}
+
+/*
+ * idt_csr_read() - CSR read operation
+ * @pdev: Pointer to the driver data
+ * @csraddr: CSR address (with no two LS bits)
+ * @data: Data to be written to CSR
+ */
+static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data)
+{
+ struct device *dev = &pdev->client->dev;
+ struct idt_csr_seq csrseq;
+ struct idt_smb_seq smbseq;
+ int ret;
+
+ /* Initialize SMBus sequence fields */
+ smbseq.ccode = pdev->iniccode | CCODE_CSR;
+ smbseq.data = (u8 *)&csrseq;
+
+ /* Lock IDT SMBus device */
+ mutex_lock(&pdev->smb_mtx);
+
+ /* Send CSR register address before reading it */
+ smbseq.bytecnt = CSR_WRRD_CNT;
+ csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
+ csrseq.csraddr = cpu_to_le16(csraddr);
+ ret = pdev->smb_write(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to init csr address 0x%04x",
+ CSR_REAL_ADDR(csraddr));
+ goto err_mutex_unlock;
+ }
+
+ /* Perform read operation */
+ smbseq.bytecnt = CSR_RD_CNT;
+ ret = pdev->smb_read(pdev, &smbseq);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read csr 0x%04hx",
+ CSR_REAL_ADDR(csraddr));
+ goto err_mutex_unlock;
+ }
+
+ /* Check whether IDT successfully retrieved CSR data */
+ if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
+ dev_err(dev, "IDT failed to perform CSR r/w");
+ ret = -EREMOTEIO;
+ goto err_mutex_unlock;
+ }
+
+ /* Save data retrieved from IDT */
+ *data = le32_to_cpu(csrseq.data);
+
+ /* Unlock IDT SMBus device */
+err_mutex_unlock:
+ mutex_unlock(&pdev->smb_mtx);
+
+ return ret;
+}
+
+/*===========================================================================
+ * Sysfs/debugfs-nodes IO-operations
+ *===========================================================================
+ */
+
+/*
+ * eeprom_write() - EEPROM sysfs-node write callback
+ * @filep: Pointer to the file system node
+ * @kobj: Pointer to the kernel object related to the sysfs-node
+ * @attr: Attributes of the file
+ * @buf: Buffer to write data to
+ * @off: Offset at which data should be written to
+ * @count: Number of bytes to write
+ */
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct idt_89hpesx_dev *pdev;
+ int ret;
+
+ /* Retrieve driver data */
+ pdev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ /* Perform EEPROM write operation */
+ ret = idt_eeprom_write(pdev, (u16)off, (u16)count, (u8 *)buf);
+ return (ret != 0 ? ret : count);
+}
+
+/*
+ * eeprom_read() - EEPROM sysfs-node read callback
+ * @filep: Pointer to the file system node
+ * @kobj: Pointer to the kernel object related to the sysfs-node
+ * @attr: Attributes of the file
+ * @buf: Buffer to write data to
+ * @off: Offset at which data should be written to
+ * @count: Number of bytes to write
+ */
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct idt_89hpesx_dev *pdev;
+ int ret;
+
+ /* Retrieve driver data */
+ pdev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ /* Perform EEPROM read operation */
+ ret = idt_eeprom_read(pdev, (u16)off, (u16)count, (u8 *)buf);
+ return (ret != 0 ? ret : count);
+}
+
+/*
+ * idt_dbgfs_csr_write() - CSR debugfs-node write callback
+ * @filep: Pointer to the file system file descriptor
+ * @buf: Buffer to read data from
+ * @count: Size of the buffer
+ * @offp: Offset within the file
+ *
+ * It accepts either "0x<reg addr>:0x<value>" for saving register address
+ * and writing value to specified DWORD register or "0x<reg addr>" for
+ * just saving register address in order to perform next read operation.
+ *
+ * WARNING No spaces are allowed. Incoming string must be strictly formated as:
+ * "<reg addr>:<value>". Register address must be aligned within 4 bytes
+ * (one DWORD).
+ */
+static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct idt_89hpesx_dev *pdev = filep->private_data;
+ char *colon_ch, *csraddr_str, *csrval_str;
+ int ret, csraddr_len, csrval_len;
+ u32 csraddr, csrval;
+ char *buf;
+
+ /* Copy data from User-space */
+ buf = kmalloc(count + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, offp, ubuf, count);
+ if (ret < 0)
+ goto free_buf;
+ buf[count] = 0;
+
+ /* Find position of colon in the buffer */
+ colon_ch = strnchr(buf, count, ':');
+
+ /*
+ * If there is colon passed then new CSR value should be parsed as
+ * well, so allocate buffer for CSR address substring.
+ * If no colon is found, then string must have just one number with
+ * no new CSR value
+ */
+ if (colon_ch != NULL) {
+ csraddr_len = colon_ch - buf;
+ csraddr_str =
+ kmalloc(sizeof(char)*(csraddr_len + 1), GFP_KERNEL);
+ if (csraddr_str == NULL) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+ /* Copy the register address to the substring buffer */
+ strncpy(csraddr_str, buf, csraddr_len);
+ csraddr_str[csraddr_len] = '\0';
+ /* Register value must follow the colon */
+ csrval_str = colon_ch + 1;
+ csrval_len = strnlen(csrval_str, count - csraddr_len);
+ } else /* if (str_colon == NULL) */ {
+ csraddr_str = (char *)buf; /* Just to shut warning up */
+ csraddr_len = strnlen(csraddr_str, count);
+ csrval_str = NULL;
+ csrval_len = 0;
+ }
+
+ /* Convert CSR address to u32 value */
+ ret = kstrtou32(csraddr_str, 0, &csraddr);
+ if (ret != 0)
+ goto free_csraddr_str;
+
+ /* Check whether passed register address is valid */
+ if (csraddr > CSR_MAX || !IS_ALIGNED(csraddr, SZ_4)) {
+ ret = -EINVAL;
+ goto free_csraddr_str;
+ }
+
+ /* Shift register address to the right so to have u16 address */
+ pdev->csr = (csraddr >> 2);
+
+ /* Parse new CSR value and send it to IDT, if colon has been found */
+ if (colon_ch != NULL) {
+ ret = kstrtou32(csrval_str, 0, &csrval);
+ if (ret != 0)
+ goto free_csraddr_str;
+
+ ret = idt_csr_write(pdev, pdev->csr, csrval);
+ if (ret != 0)
+ goto free_csraddr_str;
+ }
+
+ /* Free memory only if colon has been found */
+free_csraddr_str:
+ if (colon_ch != NULL)
+ kfree(csraddr_str);
+
+ /* Free buffer allocated for data retrieved from User-space */
+free_buf:
+ kfree(buf);
+
+ return (ret != 0 ? ret : count);
+}
+
+/*
+ * idt_dbgfs_csr_read() - CSR debugfs-node read callback
+ * @filep: Pointer to the file system file descriptor
+ * @buf: Buffer to write data to
+ * @count: Size of the buffer
+ * @offp: Offset within the file
+ *
+ * It just prints the pair "0x<reg addr>:0x<value>" to passed buffer.
+ */
+#define CSRBUF_SIZE ((size_t)32)
+static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct idt_89hpesx_dev *pdev = filep->private_data;
+ u32 csraddr, csrval;
+ char buf[CSRBUF_SIZE];
+ int ret, size;
+
+ /* Perform CSR read operation */
+ ret = idt_csr_read(pdev, pdev->csr, &csrval);
+ if (ret != 0)
+ return ret;
+
+ /* Shift register address to the left so to have real address */
+ csraddr = ((u32)pdev->csr << 2);
+
+ /* Print the "0x<reg addr>:0x<value>" to buffer */
+ size = snprintf(buf, CSRBUF_SIZE, "0x%05x:0x%08x\n",
+ (unsigned int)csraddr, (unsigned int)csrval);
+
+ /* Copy data to User-space */
+ return simple_read_from_buffer(ubuf, count, offp, buf, size);
+}
+
+/*
+ * eeprom_attribute - EEPROM sysfs-node attributes
+ *
+ * NOTE Size will be changed in compliance with OF node. EEPROM attribute will
+ * be read-only as well if the corresponding flag is specified in OF node.
+ */
+static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
+
+/*
+ * csr_dbgfs_ops - CSR debugfs-node read/write operations
+ */
+static const struct file_operations csr_dbgfs_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = idt_dbgfs_csr_write,
+ .read = idt_dbgfs_csr_read
+};
+
+/*===========================================================================
+ * Driver init/deinit methods
+ *===========================================================================
+ */
+
+/*
+ * idt_set_defval() - disable EEPROM access by default
+ * @pdev: Pointer to the driver data
+ */
+static void idt_set_defval(struct idt_89hpesx_dev *pdev)
+{
+ /* If OF info is missing then use next values */
+ pdev->eesize = 0;
+ pdev->eero = true;
+ pdev->inieecmd = 0;
+ pdev->eeaddr = 0;
+}
+
+#ifdef CONFIG_OF
+static const struct i2c_device_id ee_ids[];
+/*
+ * idt_ee_match_id() - check whether the node belongs to compatible EEPROMs
+ */
+static const struct i2c_device_id *idt_ee_match_id(struct device_node *node)
+{
+ const struct i2c_device_id *id = ee_ids;
+ char devname[I2C_NAME_SIZE];
+
+ /* Retrieve the device name without manufacturer name */
+ if (of_modalias_node(node, devname, sizeof(devname)))
+ return NULL;
+
+ /* Search through the device name */
+ while (id->name[0]) {
+ if (strcmp(devname, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+/*
+ * idt_get_ofdata() - get IDT i2c-device parameters from device tree
+ * @pdev: Pointer to the driver data
+ */
+static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
+{
+ const struct device_node *node = pdev->client->dev.of_node;
+ struct device *dev = &pdev->client->dev;
+
+ /* Read dts node parameters */
+ if (node) {
+ const struct i2c_device_id *ee_id = NULL;
+ struct device_node *child;
+ const __be32 *addr_be;
+ int len;
+
+ /* Walk through all child nodes looking for compatible one */
+ for_each_available_child_of_node(node, child) {
+ ee_id = idt_ee_match_id(child);
+ if (IS_ERR_OR_NULL(ee_id)) {
+ dev_warn(dev, "Skip unsupported child node %s",
+ child->full_name);
+ continue;
+ } else
+ break;
+ }
+
+ /* If there is no child EEPROM device, then set zero size */
+ if (!ee_id) {
+ idt_set_defval(pdev);
+ return;
+ }
+
+ /* Retrieve EEPROM size */
+ pdev->eesize = (u32)ee_id->driver_data;
+
+ /* Get custom EEPROM address from 'reg' attribute */
+ addr_be = of_get_property(child, "reg", &len);
+ if (!addr_be || (len < sizeof(*addr_be))) {
+ dev_warn(dev, "No reg on %s, use default address %d",
+ child->full_name, EEPROM_DEF_ADDR);
+ pdev->inieecmd = 0;
+ pdev->eeaddr = EEPROM_DEF_ADDR << 1;
+ } else {
+ pdev->inieecmd = EEPROM_USA;
+ pdev->eeaddr = be32_to_cpup(addr_be) << 1;
+ }
+
+ /* Check EEPROM 'read-only' flag */
+ if (of_get_property(child, "read-only", NULL))
+ pdev->eero = true;
+ else /* if (!of_get_property(node, "read-only", NULL)) */
+ pdev->eero = false;
+
+ dev_dbg(dev, "EEPROM of %u bytes found by %hhu",
+ pdev->eesize, pdev->eeaddr);
+ } else {
+ dev_warn(dev, "No dts node, EEPROM access disabled");
+ idt_set_defval(pdev);
+ }
+}
+#else
+static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
+{
+ struct device *dev = &pdev->client->dev;
+
+ dev_warn(dev, "OF table is unsupported, EEPROM access disabled");
+
+ /* Nothing we can do, just set the default values */
+ idt_set_defval(pdev);
+}
+#endif /* CONFIG_OF */
+
+/*
+ * idt_create_pdev() - create and init data structure of the driver
+ * @client: i2c client of IDT PCIe-switch device
+ */
+static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client)
+{
+ struct idt_89hpesx_dev *pdev;
+
+ /* Allocate memory for driver data */
+ pdev = devm_kmalloc(&client->dev, sizeof(struct idt_89hpesx_dev),
+ GFP_KERNEL);
+ if (pdev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize basic fields of the data */
+ pdev->client = client;
+ i2c_set_clientdata(client, pdev);
+
+ /* Read OF nodes information */
+ idt_get_ofdata(pdev);
+
+ /* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */
+ pdev->inicsrcmd = CSR_DWE;
+ pdev->csr = CSR_DEF;
+
+ /* Enable Packet Error Checking if it's supported by adapter */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
+ pdev->iniccode = CCODE_PEC;
+ client->flags |= I2C_CLIENT_PEC;
+ } else /* PEC is unsupported */ {
+ pdev->iniccode = 0;
+ }
+
+ return pdev;
+}
+
+/*
+ * idt_free_pdev() - free data structure of the driver
+ * @pdev: Pointer to the driver data
+ */
+static void idt_free_pdev(struct idt_89hpesx_dev *pdev)
+{
+ /* Clear driver data from device private field */
+ i2c_set_clientdata(pdev->client, NULL);
+}
+
+/*
+ * idt_set_smbus_ops() - set supported SMBus operations
+ * @pdev: Pointer to the driver data
+ * Return status of smbus check operations
+ */
+static int idt_set_smbus_ops(struct idt_89hpesx_dev *pdev)
+{
+ struct i2c_adapter *adapter = pdev->client->adapter;
+ struct device *dev = &pdev->client->dev;
+
+ /* Check i2c adapter read functionality */
+ if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA)) {
+ pdev->smb_read = idt_smb_read_block;
+ dev_dbg(dev, "SMBus block-read op chosen");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ pdev->smb_read = idt_smb_read_i2c_block;
+ dev_dbg(dev, "SMBus i2c-block-read op chosen");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA) &&
+ i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ pdev->smb_read = idt_smb_read_word;
+ dev_warn(dev, "Use slow word/byte SMBus read ops");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ pdev->smb_read = idt_smb_read_byte;
+ dev_warn(dev, "Use slow byte SMBus read op");
+ } else /* no supported smbus read operations */ {
+ dev_err(dev, "No supported SMBus read op");
+ return -EPFNOSUPPORT;
+ }
+
+ /* Check i2c adapter write functionality */
+ if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) {
+ pdev->smb_write = idt_smb_write_block;
+ dev_dbg(dev, "SMBus block-write op chosen");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ pdev->smb_write = idt_smb_write_i2c_block;
+ dev_dbg(dev, "SMBus i2c-block-write op chosen");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA) &&
+ i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+ pdev->smb_write = idt_smb_write_word;
+ dev_warn(dev, "Use slow word/byte SMBus write op");
+ } else if (i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+ pdev->smb_write = idt_smb_write_byte;
+ dev_warn(dev, "Use slow byte SMBus write op");
+ } else /* no supported smbus write operations */ {
+ dev_err(dev, "No supported SMBus write op");
+ return -EPFNOSUPPORT;
+ }
+
+ /* Initialize IDT SMBus slave interface mutex */
+ mutex_init(&pdev->smb_mtx);
+
+ return 0;
+}
+
+/*
+ * idt_check_dev() - check whether it's really IDT 89HPESx device
+ * @pdev: Pointer to the driver data
+ * Return status of i2c adapter check operation
+ */
+static int idt_check_dev(struct idt_89hpesx_dev *pdev)
+{
+ struct device *dev = &pdev->client->dev;
+ u32 viddid;
+ int ret;
+
+ /* Read VID and DID directly from IDT memory space */
+ ret = idt_csr_read(pdev, IDT_VIDDID_CSR, &viddid);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read VID/DID");
+ return ret;
+ }
+
+ /* Check whether it's IDT device */
+ if ((viddid & IDT_VID_MASK) != PCI_VENDOR_ID_IDT) {
+ dev_err(dev, "Got unsupported VID/DID: 0x%08x", viddid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Found IDT 89HPES device VID:0x%04x, DID:0x%04x",
+ (viddid & IDT_VID_MASK), (viddid >> 16));
+
+ return 0;
+}
+
+/*
+ * idt_create_sysfs_files() - create sysfs attribute files
+ * @pdev: Pointer to the driver data
+ * Return status of operation
+ */
+static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev)
+{
+ struct device *dev = &pdev->client->dev;
+ int ret;
+
+ /* Don't do anything if EEPROM isn't accessible */
+ if (pdev->eesize == 0) {
+ dev_dbg(dev, "Skip creating sysfs-files");
+ return 0;
+ }
+
+ /* Allocate memory for attribute file */
+ pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL);
+ if (!pdev->ee_file)
+ return -ENOMEM;
+
+ /* Copy the declared EEPROM attr structure to change some of fields */
+ memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file));
+
+ /* In case of read-only EEPROM get rid of write ability */
+ if (pdev->eero) {
+ pdev->ee_file->attr.mode &= ~0200;
+ pdev->ee_file->write = NULL;
+ }
+ /* Create EEPROM sysfs file */
+ pdev->ee_file->size = pdev->eesize;
+ ret = sysfs_create_bin_file(&dev->kobj, pdev->ee_file);
+ if (ret != 0) {
+ dev_err(dev, "Failed to create EEPROM sysfs-node");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * idt_remove_sysfs_files() - remove sysfs attribute files
+ * @pdev: Pointer to the driver data
+ */
+static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev)
+{
+ struct device *dev = &pdev->client->dev;
+
+ /* Don't do anything if EEPROM wasn't accessible */
+ if (pdev->eesize == 0)
+ return;
+
+ /* Remove EEPROM sysfs file */
+ sysfs_remove_bin_file(&dev->kobj, pdev->ee_file);
+}
+
+/*
+ * idt_create_dbgfs_files() - create debugfs files
+ * @pdev: Pointer to the driver data
+ */
+#define CSRNAME_LEN ((size_t)32)
+static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
+{
+ struct i2c_client *cli = pdev->client;
+ char fname[CSRNAME_LEN];
+
+ /* Create Debugfs directory for CSR file */
+ snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr);
+ pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
+
+ /* Create Debugfs file for CSR read/write operations */
+ pdev->csr_file = debugfs_create_file(cli->name, 0600,
+ pdev->csr_dir, pdev, &csr_dbgfs_ops);
+}
+
+/*
+ * idt_remove_dbgfs_files() - remove debugfs files
+ * @pdev: Pointer to the driver data
+ */
+static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev)
+{
+ /* Remove CSR directory and it sysfs-node */
+ debugfs_remove_recursive(pdev->csr_dir);
+}
+
+/*
+ * idt_probe() - IDT 89HPESx driver probe() callback method
+ */
+static int idt_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct idt_89hpesx_dev *pdev;
+ int ret;
+
+ /* Create driver data */
+ pdev = idt_create_pdev(client);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ /* Set SMBus operations */
+ ret = idt_set_smbus_ops(pdev);
+ if (ret != 0)
+ goto err_free_pdev;
+
+ /* Check whether it is truly IDT 89HPESx device */
+ ret = idt_check_dev(pdev);
+ if (ret != 0)
+ goto err_free_pdev;
+
+ /* Create sysfs files */
+ ret = idt_create_sysfs_files(pdev);
+ if (ret != 0)
+ goto err_free_pdev;
+
+ /* Create debugfs files */
+ idt_create_dbgfs_files(pdev);
+
+ return 0;
+
+err_free_pdev:
+ idt_free_pdev(pdev);
+
+ return ret;
+}
+
+/*
+ * idt_remove() - IDT 89HPESx driver remove() callback method
+ */
+static int idt_remove(struct i2c_client *client)
+{
+ struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
+
+ /* Remove debugfs files first */
+ idt_remove_dbgfs_files(pdev);
+
+ /* Remove sysfs files */
+ idt_remove_sysfs_files(pdev);
+
+ /* Discard driver data structure */
+ idt_free_pdev(pdev);
+
+ return 0;
+}
+
+/*
+ * ee_ids - array of supported EEPROMs
+ */
+static const struct i2c_device_id ee_ids[] = {
+ { "24c32", 4096},
+ { "24c64", 8192},
+ { "24c128", 16384},
+ { "24c256", 32768},
+ { "24c512", 65536},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ee_ids);
+
+/*
+ * idt_ids - supported IDT 89HPESx devices
+ */
+static const struct i2c_device_id idt_ids[] = {
+ { "89hpes8nt2", 0 },
+ { "89hpes12nt3", 0 },
+
+ { "89hpes24nt6ag2", 0 },
+ { "89hpes32nt8ag2", 0 },
+ { "89hpes32nt8bg2", 0 },
+ { "89hpes12nt12g2", 0 },
+ { "89hpes16nt16g2", 0 },
+ { "89hpes24nt24g2", 0 },
+ { "89hpes32nt24ag2", 0 },
+ { "89hpes32nt24bg2", 0 },
+
+ { "89hpes12n3", 0 },
+ { "89hpes12n3a", 0 },
+ { "89hpes24n3", 0 },
+ { "89hpes24n3a", 0 },
+
+ { "89hpes32h8", 0 },
+ { "89hpes32h8g2", 0 },
+ { "89hpes48h12", 0 },
+ { "89hpes48h12g2", 0 },
+ { "89hpes48h12ag2", 0 },
+ { "89hpes16h16", 0 },
+ { "89hpes22h16", 0 },
+ { "89hpes22h16g2", 0 },
+ { "89hpes34h16", 0 },
+ { "89hpes34h16g2", 0 },
+ { "89hpes64h16", 0 },
+ { "89hpes64h16g2", 0 },
+ { "89hpes64h16ag2", 0 },
+
+ /* { "89hpes3t3", 0 }, // No SMBus-slave iface */
+ { "89hpes12t3g2", 0 },
+ { "89hpes24t3g2", 0 },
+ /* { "89hpes4t4", 0 }, // No SMBus-slave iface */
+ { "89hpes16t4", 0 },
+ { "89hpes4t4g2", 0 },
+ { "89hpes10t4g2", 0 },
+ { "89hpes16t4g2", 0 },
+ { "89hpes16t4ag2", 0 },
+ { "89hpes5t5", 0 },
+ { "89hpes6t5", 0 },
+ { "89hpes8t5", 0 },
+ { "89hpes8t5a", 0 },
+ { "89hpes24t6", 0 },
+ { "89hpes6t6g2", 0 },
+ { "89hpes24t6g2", 0 },
+ { "89hpes16t7", 0 },
+ { "89hpes32t8", 0 },
+ { "89hpes32t8g2", 0 },
+ { "89hpes48t12", 0 },
+ { "89hpes48t12g2", 0 },
+ { /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(i2c, idt_ids);
+
+/*
+ * idt_driver - IDT 89HPESx driver structure
+ */
+static struct i2c_driver idt_driver = {
+ .driver = {
+ .name = IDT_NAME,
+ },
+ .probe = idt_probe,
+ .remove = idt_remove,
+ .id_table = idt_ids,
+};
+
+/*
+ * idt_init() - IDT 89HPESx driver init() callback method
+ */
+static int __init idt_init(void)
+{
+ /* Create Debugfs directory first */
+ if (debugfs_initialized())
+ csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
+
+ /* Add new i2c-device driver */
+ return i2c_add_driver(&idt_driver);
+}
+module_init(idt_init);
+
+/*
+ * idt_exit() - IDT 89HPESx driver exit() callback method
+ */
+static void __exit idt_exit(void)
+{
+ /* Discard debugfs directory and all files if any */
+ debugfs_remove_recursive(csr_dbgdir);
+
+ /* Unregister i2c-device driver */
+ i2c_del_driver(&idt_driver);
+}
+module_exit(idt_exit);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 6c1f49a85023..4fd21e86ad56 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1336,7 +1336,6 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
static struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
- .link_reset = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
.resume = genwqe_err_resume,
};
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index cba0837aee2e..e3f4cd8876b5 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -81,12 +81,17 @@ void lkdtm_OVERFLOW(void)
(void) recursive_loop(recur_count);
}
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+ memset(stack, 'a', 64);
+}
+
noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8];
+ __lkdtm_CORRUPT_STACK(&data);
- memset((void *)data, 'a', 64);
pr_info("Corrupted stack with '%16s'...\n", data);
}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 16e4cf110930..b9a4cd4a9b68 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -539,7 +539,9 @@ static void __exit lkdtm_module_exit(void)
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_jprobe != NULL)
+ unregister_jprobe(lkdtm_jprobe);
+
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 466afb2611c6..0e7406ccb6dd 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -132,8 +132,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
- cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
- typeof(*cb), list);
+ cb = list_first_entry_or_null(&dev->amthif_cmd_list, typeof(*cb), list);
if (!cb) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->fp = NULL;
@@ -167,7 +166,7 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
struct mei_device *dev = cl->dev;
- list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
+ list_add_tail(&cb->list, &dev->amthif_cmd_list);
/*
* The previous request is still in processing, queue this one.
@@ -211,7 +210,7 @@ unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
* Return: 0, OK; otherwise, error.
*/
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
int ret;
@@ -237,7 +236,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
*/
int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev;
int ret;
@@ -312,50 +311,30 @@ void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
}
/**
- * mei_clear_list - removes all callbacks associated with file
- * from mei_cb_list
- *
- * @file: file structure
- * @mei_cb_list: callbacks list
- *
- * mei_clear_list is called to clear resources associated with file
- * when application calls close function or Ctrl-C was pressed
- */
-static void mei_clear_list(const struct file *file,
- struct list_head *mei_cb_list)
-{
- struct mei_cl_cb *cb, *next;
-
- list_for_each_entry_safe(cb, next, mei_cb_list, list)
- if (file == cb->fp)
- mei_io_cb_free(cb);
-}
-
-/**
* mei_amthif_release - the release function
*
* @dev: device structure
-* @file: pointer to file structure
+* @fp: pointer to file structure
*
* Return: 0 on success, <0 on error
*/
-int mei_amthif_release(struct mei_device *dev, struct file *file)
+int mei_amthif_release(struct mei_device *dev, struct file *fp)
{
- struct mei_cl *cl = file->private_data;
+ struct mei_cl *cl = fp->private_data;
if (dev->iamthif_open_count > 0)
dev->iamthif_open_count--;
- if (cl->fp == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
+ if (cl->fp == fp && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
- dev->iamthif_state);
+ dev->iamthif_state);
dev->iamthif_canceled = true;
}
- mei_clear_list(file, &dev->amthif_cmd_list.list);
- mei_clear_list(file, &cl->rd_completed);
- mei_clear_list(file, &dev->ctrl_rd_list.list);
+ /* Don't clean ctrl_rd_list here, the reads has to be completed */
+ mei_io_list_free_fp(&dev->amthif_cmd_list, fp);
+ mei_io_list_free_fp(&cl->rd_completed, fp);
return 0;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 2d9c5dd06e42..cb3e9e0ca049 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -499,6 +499,25 @@ out:
EXPORT_SYMBOL_GPL(mei_cldev_enable);
/**
+ * mei_cldev_unregister_callbacks - internal wrapper for unregistering
+ * callbacks.
+ *
+ * @cldev: client device
+ */
+static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
+{
+ if (cldev->rx_cb) {
+ cancel_work_sync(&cldev->rx_work);
+ cldev->rx_cb = NULL;
+ }
+
+ if (cldev->notif_cb) {
+ cancel_work_sync(&cldev->notif_work);
+ cldev->notif_cb = NULL;
+ }
+}
+
+/**
* mei_cldev_disable - disable me client device
* disconnect form the me client
*
@@ -519,6 +538,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
bus = cldev->bus;
+ mei_cldev_unregister_callbacks(cldev);
+
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
@@ -542,6 +563,37 @@ out:
EXPORT_SYMBOL_GPL(mei_cldev_disable);
/**
+ * mei_cl_bus_module_get - acquire module of the underlying
+ * hw module.
+ *
+ * @cl: host client
+ *
+ * Return: true on success; false if the module was removed.
+ */
+bool mei_cl_bus_module_get(struct mei_cl *cl)
+{
+ struct mei_cl_device *cldev = cl->cldev;
+
+ if (!cldev)
+ return true;
+
+ return try_module_get(cldev->bus->dev->driver->owner);
+}
+
+/**
+ * mei_cl_bus_module_put - release the underlying hw module.
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_module_put(struct mei_cl *cl)
+{
+ struct mei_cl_device *cldev = cl->cldev;
+
+ if (cldev)
+ module_put(cldev->bus->dev->driver->owner);
+}
+
+/**
* mei_cl_device_find - find matching entry in the driver id table
*
* @cldev: me client device
@@ -665,19 +717,12 @@ static int mei_cl_device_remove(struct device *dev)
if (!cldev || !dev->driver)
return 0;
- if (cldev->rx_cb) {
- cancel_work_sync(&cldev->rx_work);
- cldev->rx_cb = NULL;
- }
- if (cldev->notif_cb) {
- cancel_work_sync(&cldev->notif_work);
- cldev->notif_cb = NULL;
- }
-
cldrv = to_mei_cl_driver(dev->driver);
if (cldrv->remove)
ret = cldrv->remove(cldev);
+ mei_cldev_unregister_callbacks(cldev);
+
module_put(THIS_MODULE);
dev->driver = NULL;
return ret;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index b0395601c6ae..68fe37b5bc52 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -377,19 +377,19 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
}
/**
- * __mei_io_list_flush - removes and frees cbs belonging to cl.
+ * __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
*
- * @list: an instance of our list structure
+ * @head: an instance of our list structure
* @cl: host client, can be NULL for flushing the whole list
* @free: whether to free the cbs
*/
-static void __mei_io_list_flush(struct mei_cl_cb *list,
- struct mei_cl *cl, bool free)
+static void __mei_io_list_flush_cl(struct list_head *head,
+ const struct mei_cl *cl, bool free)
{
struct mei_cl_cb *cb, *next;
/* enable removing everything if no cl is specified */
- list_for_each_entry_safe(cb, next, &list->list, list) {
+ list_for_each_entry_safe(cb, next, head, list) {
if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
list_del_init(&cb->list);
if (free)
@@ -399,25 +399,42 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
}
/**
- * mei_io_list_flush - removes list entry belonging to cl.
+ * mei_io_list_flush_cl - removes list entry belonging to cl.
*
- * @list: An instance of our list structure
+ * @head: An instance of our list structure
* @cl: host client
*/
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+static inline void mei_io_list_flush_cl(struct list_head *head,
+ const struct mei_cl *cl)
{
- __mei_io_list_flush(list, cl, false);
+ __mei_io_list_flush_cl(head, cl, false);
}
/**
- * mei_io_list_free - removes cb belonging to cl and free them
+ * mei_io_list_free_cl - removes cb belonging to cl and free them
*
- * @list: An instance of our list structure
+ * @head: An instance of our list structure
* @cl: host client
*/
-static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
+static inline void mei_io_list_free_cl(struct list_head *head,
+ const struct mei_cl *cl)
{
- __mei_io_list_flush(list, cl, true);
+ __mei_io_list_flush_cl(head, cl, true);
+}
+
+/**
+ * mei_io_list_free_fp - free cb from a list that matches file pointer
+ *
+ * @head: io list
+ * @fp: file pointer (matching cb file object), may be NULL
+ */
+void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
+{
+ struct mei_cl_cb *cb, *next;
+
+ list_for_each_entry_safe(cb, next, head, list)
+ if (!fp || fp == cb->fp)
+ mei_io_cb_free(cb);
}
/**
@@ -479,7 +496,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
if (!cb)
return NULL;
- list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list);
+ list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
return cb;
}
@@ -504,27 +521,6 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
}
/**
- * mei_cl_read_cb_flush - free client's read pending and completed cbs
- * for a specific file
- *
- * @cl: host client
- * @fp: file pointer (matching cb file object), may be NULL
- */
-void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
-{
- struct mei_cl_cb *cb, *next;
-
- list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
- if (!fp || fp == cb->fp)
- mei_io_cb_free(cb);
-
-
- list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
- if (!fp || fp == cb->fp)
- mei_io_cb_free(cb);
-}
-
-/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @cl: host client
@@ -542,18 +538,16 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
- mei_io_list_free(&cl->dev->write_list, cl);
- mei_io_list_free(&cl->dev->write_waiting_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-
- mei_cl_read_cb_flush(cl, fp);
+ mei_io_list_free_cl(&cl->dev->write_list, cl);
+ mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_free_fp(&cl->rd_pending, fp);
+ mei_io_list_free_fp(&cl->rd_completed, fp);
return 0;
}
-
/**
* mei_cl_init - initializes cl.
*
@@ -756,7 +750,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
*
* @cl: host client
*/
-void mei_cl_set_disconnected(struct mei_cl *cl)
+static void mei_cl_set_disconnected(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
@@ -765,15 +759,18 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
return;
cl->state = MEI_FILE_DISCONNECTED;
- mei_io_list_free(&dev->write_list, cl);
- mei_io_list_free(&dev->write_waiting_list, cl);
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ mei_io_list_free_cl(&dev->write_list, cl);
+ mei_io_list_free_cl(&dev->write_waiting_list, cl);
+ mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
+ mei_io_list_free_cl(&dev->amthif_cmd_list, cl);
mei_cl_wake_all(cl);
cl->rx_flow_ctrl_creds = 0;
cl->tx_flow_ctrl_creds = 0;
cl->timer_count = 0;
+ mei_cl_bus_module_put(cl);
+
if (!cl->me_cl)
return;
@@ -829,7 +826,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
return ret;
}
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
mei_schedule_stall_timer(dev);
@@ -847,7 +844,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
@@ -862,7 +859,7 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
ret = mei_cl_send_disconnect(cl, cb);
if (ret)
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return ret;
}
@@ -984,7 +981,7 @@ static bool mei_cl_is_other_connecting(struct mei_cl *cl)
dev = cl->dev;
- list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
+ list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
if (cb->fop_type == MEI_FOP_CONNECT &&
mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
return true;
@@ -1015,7 +1012,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
return ret;
}
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
mei_schedule_stall_timer(dev);
return 0;
@@ -1031,7 +1028,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
@@ -1049,7 +1046,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
rets = mei_cl_send_connect(cl, cb);
if (rets)
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return rets;
}
@@ -1077,13 +1074,17 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
dev = cl->dev;
+ if (!mei_cl_bus_module_get(cl))
+ return -ENODEV;
+
rets = mei_cl_set_connecting(cl, me_cl);
if (rets)
- return rets;
+ goto nortpm;
if (mei_cl_is_fixed_address(cl)) {
cl->state = MEI_FILE_CONNECTED;
- return 0;
+ rets = 0;
+ goto nortpm;
}
rets = pm_runtime_get(dev->dev);
@@ -1117,8 +1118,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
if (!mei_cl_is_connected(cl)) {
if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
/* ignore disconnect return valuue;
* in case of failure reset will be invoked
*/
@@ -1270,7 +1271,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
@@ -1288,11 +1289,11 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
ret = mei_hbm_cl_notify_req(dev, cl, request);
if (ret) {
cl->status = ret;
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return ret;
}
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
@@ -1325,6 +1326,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
return -EOPNOTSUPP;
}
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
@@ -1344,7 +1348,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
rets = -ENODEV;
goto out;
}
- list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
mutex_unlock(&dev->device_lock);
@@ -1419,6 +1423,11 @@ int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
dev = cl->dev;
+ if (!dev->hbm_f_ev_supported) {
+ cl_dbg(dev, cl, "notifications not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (!mei_cl_is_connected(cl))
return -ENODEV;
@@ -1519,7 +1528,7 @@ nortpm:
* Return: 0, OK; otherwise error.
*/
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev;
struct mei_msg_data *buf;
@@ -1591,13 +1600,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
}
if (mei_hdr.msg_complete)
- list_move_tail(&cb->list, &dev->write_waiting_list.list);
+ list_move_tail(&cb->list, &dev->write_waiting_list);
return 0;
err:
cl->status = rets;
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return rets;
}
@@ -1687,9 +1696,9 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
out:
if (mei_hdr.msg_complete)
- list_add_tail(&cb->list, &dev->write_waiting_list.list);
+ list_add_tail(&cb->list, &dev->write_waiting_list);
else
- list_add_tail(&cb->list, &dev->write_list.list);
+ list_add_tail(&cb->list, &dev->write_list);
cb = NULL;
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index f2545af9be7b..545ae319ba90 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -83,17 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
* MEI IO Functions
*/
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
- INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+void mei_io_list_free_fp(struct list_head *head, const struct file *fp);
/*
* MEI Host Client Functions
@@ -110,7 +100,6 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
const struct file *fp);
-void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
const struct file *fp);
@@ -209,19 +198,18 @@ static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
}
int mei_cl_disconnect(struct mei_cl *cl);
-void mei_cl_set_disconnected(struct mei_cl *cl);
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
const struct file *file);
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
@@ -232,7 +220,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
int mei_cl_notify_request(struct mei_cl *cl,
const struct file *file, u8 request);
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
void mei_cl_notify(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 25b4a1ba522d..ba3a774c8d71 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -815,7 +815,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
struct mei_cl_cb *cb, *next;
cl = NULL;
- list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
cl = cb->cl;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index a05375a3338a..71216affcab1 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -140,6 +140,19 @@ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
}
/**
+ * mei_hcsr_set_hig - set host interrupt (set H_IG)
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set_hig(struct mei_device *dev)
+{
+ u32 hcsr;
+
+ hcsr = mei_hcsr_read(dev) | H_IG;
+ mei_hcsr_set(dev, hcsr);
+}
+
+/**
* mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
*
* @dev: the device structure
@@ -381,6 +394,19 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
}
/**
+ * mei_me_hw_is_resetting - check whether the me(hw) is in reset
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_me_hw_is_resetting(struct mei_device *dev)
+{
+ u32 mecsr = mei_me_mecsr_read(dev);
+
+ return (mecsr & ME_RST_HRA) == ME_RST_HRA;
+}
+
+/**
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
* or timeout is reached
*
@@ -505,7 +531,6 @@ static int mei_me_hbuf_write(struct mei_device *dev,
unsigned long rem;
unsigned long length = header->length;
u32 *reg_buf = (u32 *)buf;
- u32 hcsr;
u32 dw_cnt;
int i;
int empty_slots;
@@ -532,8 +557,7 @@ static int mei_me_hbuf_write(struct mei_device *dev,
mei_me_hcbww_write(dev, reg);
}
- hcsr = mei_hcsr_read(dev) | H_IG;
- mei_hcsr_set(dev, hcsr);
+ mei_hcsr_set_hig(dev);
if (!mei_me_hw_is_ready(dev))
return -EIO;
@@ -580,7 +604,6 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
- u32 hcsr;
for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
*reg_buf++ = mei_me_mecbrw_read(dev);
@@ -591,8 +614,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
memcpy(reg_buf, &reg, buffer_length);
}
- hcsr = mei_hcsr_read(dev) | H_IG;
- mei_hcsr_set(dev, hcsr);
+ mei_hcsr_set_hig(dev);
return 0;
}
@@ -1189,7 +1211,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_cl_cb complete_list;
+ struct list_head cmpl_list;
s32 slots;
u32 hcsr;
int rets = 0;
@@ -1201,7 +1223,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
hcsr = mei_hcsr_read(dev);
me_intr_clear(dev, hcsr);
- mei_io_list_init(&complete_list);
+ INIT_LIST_HEAD(&cmpl_list);
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
@@ -1210,6 +1232,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
+ if (mei_me_hw_is_resetting(dev))
+ mei_hcsr_set_hig(dev);
+
mei_me_pg_intr(dev, me_intr_src(hcsr));
/* check if we need to start the dev */
@@ -1227,7 +1252,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
dev_dbg(dev->dev, "slots to read = %08x\n", slots);
- rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
/* There is a race between ME write and interrupt delivery:
* Not all data is always available immediately after the
* interrupt, so try to read again on the next interrupt.
@@ -1252,11 +1277,11 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
*/
if (dev->pg_event != MEI_PG_EVENT_WAIT &&
dev->pg_event != MEI_PG_EVENT_RECEIVED) {
- rets = mei_irq_write_handler(dev, &complete_list);
+ rets = mei_irq_write_handler(dev, &cmpl_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
- mei_irq_compl_handler(dev, &complete_list);
+ mei_irq_compl_handler(dev, &cmpl_list);
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1389,7 +1414,7 @@ const struct mei_cfg mei_me_pch8_sps_cfg = {
* @pdev: The pci device structure
* @cfg: per device generation config
*
- * Return: The mei_device_device pointer on success, NULL on failure.
+ * Return: The mei_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
const struct mei_cfg *cfg)
@@ -1397,8 +1422,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
struct mei_device *dev;
struct mei_me_hw *hw;
- dev = kzalloc(sizeof(struct mei_device) +
- sizeof(struct mei_me_hw), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
hw = to_me_hw(dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e9f8c0aeec13..24e4a4c96606 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1057,7 +1057,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_txe_hw *hw = to_txe_hw(dev);
- struct mei_cl_cb complete_list;
+ struct list_head cmpl_list;
s32 slots;
int rets = 0;
@@ -1069,7 +1069,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
/* initialize our complete list */
mutex_lock(&dev->device_lock);
- mei_io_list_init(&complete_list);
+ INIT_LIST_HEAD(&cmpl_list);
if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
mei_txe_check_and_ack_intrs(dev, true);
@@ -1126,7 +1126,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
slots = mei_count_full_read_slots(dev);
if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
/* Read from TXE */
- rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
if (rets && dev->dev_state != MEI_DEV_RESETTING) {
dev_err(dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
@@ -1144,14 +1144,14 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (hw->aliveness && dev->hbuf_is_ready) {
/* get the real register value */
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
- rets = mei_irq_write_handler(dev, &complete_list);
+ rets = mei_irq_write_handler(dev, &cmpl_list);
if (rets && rets != -EMSGSIZE)
dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
rets);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
- mei_irq_compl_handler(dev, &complete_list);
+ mei_irq_compl_handler(dev, &cmpl_list);
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1207,8 +1207,8 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = kzalloc(sizeof(struct mei_device) +
- sizeof(struct mei_txe_hw), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ sizeof(struct mei_txe_hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index ce3ed0b88b0c..e1e8b66d7648 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -45,7 +45,7 @@
* @intr_cause: translated interrupt cause
*/
struct mei_txe_hw {
- void __iomem *mem_addr[NUM_OF_MEM_BARS];
+ void __iomem * const *mem_addr;
u32 aliveness;
u32 readiness;
u32 slots;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 41e5760a6886..cfb1cdf176fa 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -349,16 +349,16 @@ EXPORT_SYMBOL_GPL(mei_stop);
bool mei_write_is_idle(struct mei_device *dev)
{
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
- list_empty(&dev->ctrl_wr_list.list) &&
- list_empty(&dev->write_list.list) &&
- list_empty(&dev->write_waiting_list.list));
+ list_empty(&dev->ctrl_wr_list) &&
+ list_empty(&dev->write_list) &&
+ list_empty(&dev->write_waiting_list));
dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
- list_empty(&dev->ctrl_wr_list.list),
- list_empty(&dev->write_list.list),
- list_empty(&dev->write_waiting_list.list));
+ list_empty(&dev->ctrl_wr_list),
+ list_empty(&dev->write_list),
+ list_empty(&dev->write_waiting_list));
return idle;
}
@@ -388,17 +388,17 @@ void mei_device_init(struct mei_device *dev,
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
- mei_io_list_init(&dev->write_list);
- mei_io_list_init(&dev->write_waiting_list);
- mei_io_list_init(&dev->ctrl_wr_list);
- mei_io_list_init(&dev->ctrl_rd_list);
+ INIT_LIST_HEAD(&dev->write_list);
+ INIT_LIST_HEAD(&dev->write_waiting_list);
+ INIT_LIST_HEAD(&dev->ctrl_wr_list);
+ INIT_LIST_HEAD(&dev->ctrl_rd_list);
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
INIT_WORK(&dev->reset_work, mei_reset_work);
INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
- mei_io_list_init(&dev->amthif_cmd_list);
+ INIT_LIST_HEAD(&dev->amthif_cmd_list);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b584749bcc4a..406e9e2b2fff 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -35,14 +35,14 @@
* for the completed callbacks
*
* @dev: mei device
- * @compl_list: list of completed cbs
+ * @cmpl_list: list of completed cbs
*/
-void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
+void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
{
struct mei_cl_cb *cb, *next;
struct mei_cl *cl;
- list_for_each_entry_safe(cb, next, &compl_list->list, list) {
+ list_for_each_entry_safe(cb, next, cmpl_list, list) {
cl = cb->cl;
list_del_init(&cb->list);
@@ -92,13 +92,13 @@ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
*
* @cl: reading client
* @mei_hdr: header of mei client message
- * @complete_list: completion list
+ * @cmpl_list: completion list
*
* Return: always 0
*/
int mei_cl_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
- struct mei_cl_cb *complete_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
@@ -144,7 +144,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (mei_hdr->msg_complete) {
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
- list_move_tail(&cb->list, &complete_list->list);
+ list_move_tail(&cb->list, cmpl_list);
} else {
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
@@ -154,7 +154,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
discard:
if (cb)
- list_move_tail(&cb->list, &complete_list->list);
+ list_move_tail(&cb->list, cmpl_list);
mei_irq_discard_msg(dev, mei_hdr);
return 0;
}
@@ -169,7 +169,7 @@ discard:
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
@@ -183,7 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return ret;
}
@@ -199,7 +199,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list)
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
@@ -219,7 +219,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
if (ret) {
cl->status = ret;
cb->buf_idx = 0;
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
return ret;
}
@@ -249,7 +249,7 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
* Return: 0 on success, <0 on failure.
*/
int mei_irq_read_handler(struct mei_device *dev,
- struct mei_cl_cb *cmpl_list, s32 *slots)
+ struct list_head *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl;
@@ -347,12 +347,11 @@ EXPORT_SYMBOL_GPL(mei_irq_read_handler);
*
* Return: 0 on success, <0 on failure.
*/
-int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
+int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
{
struct mei_cl *cl;
struct mei_cl_cb *cb, *next;
- struct mei_cl_cb *list;
s32 slots;
int ret;
@@ -367,19 +366,18 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
/* complete all waiting for write CB */
dev_dbg(dev->dev, "complete all waiting for write cb.\n");
- list = &dev->write_waiting_list;
- list_for_each_entry_safe(cb, next, &list->list, list) {
+ list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
cl = cb->cl;
cl->status = 0;
cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
- list_move_tail(&cb->list, &cmpl_list->list);
+ list_move_tail(&cb->list, cmpl_list);
}
/* complete control write list CB */
dev_dbg(dev->dev, "complete control write list cb.\n");
- list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
+ list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
cl = cb->cl;
switch (cb->fop_type) {
case MEI_FOP_DISCONNECT:
@@ -423,7 +421,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
}
/* complete write list CB */
dev_dbg(dev->dev, "complete write list cb.\n");
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list_for_each_entry_safe(cb, next, &dev->write_list, list) {
cl = cb->cl;
if (cl == &dev->iamthif_cl)
ret = mei_amthif_irq_write(cl, cb, cmpl_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e1bf54481fd6..9d0b7050c79a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- if (rets == -EBUSY &&
- !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
- rets = -ENOMEM;
- goto out;
- }
- do {
- mutex_unlock(&dev->device_lock);
-
- if (wait_event_interruptible(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
- (!mei_cl_is_connected(cl)))) {
+again:
+ mutex_unlock(&dev->device_lock);
+ if (wait_event_interruptible(cl->rx_wait,
+ !list_empty(&cl->rd_completed) ||
+ !mei_cl_is_connected(cl))) {
+ if (signal_pending(current))
+ return -EINTR;
+ return -ERESTARTSYS;
+ }
+ mutex_lock(&dev->device_lock);
- if (signal_pending(current))
- return -EINTR;
- return -ERESTARTSYS;
- }
+ if (!mei_cl_is_connected(cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
- mutex_lock(&dev->device_lock);
- if (!mei_cl_is_connected(cl)) {
- rets = -ENODEV;
- goto out;
- }
+ cb = mei_cl_read_cb(cl, file);
+ if (!cb) {
+ /*
+ * For amthif all the waiters are woken up,
+ * but only fp with matching cb->fp get the cb,
+ * the others have to return to wait on read.
+ */
+ if (cl == &dev->iamthif_cl)
+ goto again;
- cb = mei_cl_read_cb(cl, file);
- } while (!cb);
+ rets = 0;
+ goto out;
+ }
copy_buffer:
/* now copy the data to user space */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 8dadb98662a9..d41aac53a2ac 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -328,6 +328,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
bool mei_cl_bus_rx_event(struct mei_cl *cl);
bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
+bool mei_cl_bus_module_get(struct mei_cl *cl);
+void mei_cl_bus_module_put(struct mei_cl *cl);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
@@ -439,10 +441,10 @@ struct mei_device {
struct cdev cdev;
int minor;
- struct mei_cl_cb write_list;
- struct mei_cl_cb write_waiting_list;
- struct mei_cl_cb ctrl_wr_list;
- struct mei_cl_cb ctrl_rd_list;
+ struct list_head write_list;
+ struct list_head write_waiting_list;
+ struct list_head ctrl_wr_list;
+ struct list_head ctrl_rd_list;
struct list_head file_list;
long open_handle_count;
@@ -499,7 +501,7 @@ struct mei_device {
bool override_fixed_address;
/* amthif list for cmd waiting */
- struct mei_cl_cb amthif_cmd_list;
+ struct list_head amthif_cmd_list;
struct mei_cl iamthif_cl;
long iamthif_open_count;
u32 iamthif_stall_timer;
@@ -571,10 +573,10 @@ void mei_cancel_work(struct mei_device *dev);
void mei_timer(struct work_struct *work);
void mei_schedule_stall_timer(struct mei_device *dev);
int mei_irq_read_handler(struct mei_device *dev,
- struct mei_cl_cb *cmpl_list, s32 *slots);
+ struct list_head *cmpl_list, s32 *slots);
-int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
-void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
+int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list);
+void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
/*
* AMTHIF - AMT Host Interface Functions
@@ -590,12 +592,12 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_run_next_cmd(struct mei_device *dev);
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct mei_cl_cb *cmpl_list);
+ struct list_head *cmpl_list);
void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
- struct mei_cl_cb *complete_list);
+ struct list_head *cmpl_list);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f9c6ec4b98ab..0a668fdfbbe9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -149,18 +149,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
/* enable pci dev */
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
+ /* pci request regions and mapping IO device memory for mei driver */
+ err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
+ goto end;
}
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
@@ -173,24 +173,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto release_regions;
+ goto end;
}
-
/* allocates and initializes the mei dev structure */
dev = mei_me_dev_init(pdev, cfg);
if (!dev) {
err = -ENOMEM;
- goto release_regions;
+ goto end;
}
hw = to_me_hw(dev);
- /* mapping IO device memory */
- hw->mem_addr = pci_iomap(pdev, 0, 0);
- if (!hw->mem_addr) {
- dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
+ hw->mem_addr = pcim_iomap_table(pdev)[0];
+
pci_enable_msi(pdev);
/* request and enable interrupt */
@@ -203,7 +197,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
- goto disable_msi;
+ goto end;
}
if (mei_start(dev)) {
@@ -242,15 +236,6 @@ release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
-disable_msi:
- pci_disable_msi(pdev);
- pci_iounmap(pdev, hw->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
end:
dev_err(&pdev->dev, "initialization failed.\n");
return err;
@@ -267,7 +252,6 @@ end:
static void mei_me_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
- struct mei_me_hw *hw;
dev = pci_get_drvdata(pdev);
if (!dev)
@@ -276,33 +260,19 @@ static void mei_me_remove(struct pci_dev *pdev)
if (mei_pg_is_enabled(dev))
pm_runtime_get_noresume(&pdev->dev);
- hw = to_me_hw(dev);
-
-
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_me_unset_pm_domain(dev);
- /* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
- if (hw->mem_addr)
- pci_iounmap(pdev, hw->mem_addr);
mei_deregister(dev);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
-
}
+
#ifdef CONFIG_PM_SLEEP
static int mei_me_pci_suspend(struct device *device)
{
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 58ffd30dcc91..fe088b40daf9 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -52,17 +52,6 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
-static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
-{
- int i;
-
- for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
- if (hw->mem_addr[i]) {
- pci_iounmap(pdev, hw->mem_addr[i]);
- hw->mem_addr[i] = NULL;
- }
- }
-}
/**
* mei_txe_probe - Device Initialization Routine
*
@@ -75,22 +64,22 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
+ const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
int err;
- int i;
/* enable pci dev */
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
+ /* pci request regions and mapping IO device memory for mei driver */
+ err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
+ goto end;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
@@ -98,7 +87,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
- goto release_regions;
+ goto end;
}
}
@@ -106,20 +95,10 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = mei_txe_dev_init(pdev);
if (!dev) {
err = -ENOMEM;
- goto release_regions;
+ goto end;
}
hw = to_txe_hw(dev);
-
- /* mapping IO device memory */
- for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
- hw->mem_addr[i] = pci_iomap(pdev, i, 0);
- if (!hw->mem_addr[i]) {
- dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- }
-
+ hw->mem_addr = pcim_iomap_table(pdev);
pci_enable_msi(pdev);
@@ -140,7 +119,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
pdev->irq);
- goto free_device;
+ goto end;
}
if (mei_start(dev)) {
@@ -173,23 +152,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
stop:
mei_stop(dev);
release_irq:
-
mei_cancel_work(dev);
-
- /* disable interrupts */
mei_disable_interrupts(dev);
-
free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
-free_device:
- mei_txe_pci_iounmap(pdev, hw);
-
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
end:
dev_err(&pdev->dev, "initialization failed.\n");
return err;
@@ -206,38 +171,24 @@ end:
static void mei_txe_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
- struct mei_txe_hw *hw;
dev = pci_get_drvdata(pdev);
if (!dev) {
- dev_err(&pdev->dev, "mei: dev =NULL\n");
+ dev_err(&pdev->dev, "mei: dev == NULL\n");
return;
}
pm_runtime_get_noresume(&pdev->dev);
- hw = to_txe_hw(dev);
-
mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_txe_unset_pm_domain(dev);
- /* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
- pci_set_drvdata(pdev, NULL);
-
- mei_txe_pci_iounmap(pdev, hw);
mei_deregister(dev);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 88e45234d527..fed992e2c258 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
if (ret) {
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
- kfree(vdev);
return ret;
}
diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
index 6030ac5b8c63..ef2ece0f26af 100644
--- a/drivers/misc/panel.c
+++ b/drivers/misc/panel.c
@@ -56,6 +56,7 @@
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/workqueue.h>
#include <generated/utsrelease.h>
#include <linux/io.h>
@@ -64,8 +65,6 @@
#define LCD_MINOR 156
#define KEYPAD_MINOR 185
-#define PANEL_VERSION "0.9.5"
-
#define LCD_MAXBYTES 256 /* max burst write */
#define KEYPAD_BUFFER 64
@@ -77,8 +76,8 @@
/* a key repeats this times INPUT_POLL_TIME */
#define KEYPAD_REP_DELAY (2)
-/* keep the light on this times INPUT_POLL_TIME for each flash */
-#define FLASH_LIGHT_TEMPO (200)
+/* keep the light on this many seconds for each flash */
+#define FLASH_LIGHT_TEMPO (4)
/* converts an r_str() input to an active high, bits string : 000BAOSE */
#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
@@ -121,8 +120,6 @@
#define PIN_SELECP 17
#define PIN_NOT_SET 127
-#define LCD_FLAG_S 0x0001
-#define LCD_FLAG_ID 0x0002
#define LCD_FLAG_B 0x0004 /* blink on */
#define LCD_FLAG_C 0x0008 /* cursor on */
#define LCD_FLAG_D 0x0010 /* display on */
@@ -256,7 +253,10 @@ static struct {
int hwidth;
int charset;
int proto;
- int light_tempo;
+
+ struct delayed_work bl_work;
+ struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
+ bool bl_tempo;
/* TODO: use union here? */
struct {
@@ -661,8 +661,6 @@ static void lcd_get_bits(unsigned int port, int *val)
}
}
-static void init_scan_timer(void);
-
/* sets data port bits according to current signals values */
static int set_data_bits(void)
{
@@ -794,11 +792,8 @@ static void lcd_send_serial(int byte)
}
/* turn the backlight on or off */
-static void lcd_backlight(int on)
+static void __lcd_backlight(int on)
{
- if (lcd.pins.bl == PIN_NONE)
- return;
-
/* The backlight is activated by setting the AUTOFEED line to +5V */
spin_lock_irq(&pprt_lock);
if (on)
@@ -809,6 +804,44 @@ static void lcd_backlight(int on)
spin_unlock_irq(&pprt_lock);
}
+static void lcd_backlight(int on)
+{
+ if (lcd.pins.bl == PIN_NONE)
+ return;
+
+ mutex_lock(&lcd.bl_tempo_lock);
+ if (!lcd.bl_tempo)
+ __lcd_backlight(on);
+ mutex_unlock(&lcd.bl_tempo_lock);
+}
+
+static void lcd_bl_off(struct work_struct *work)
+{
+ mutex_lock(&lcd.bl_tempo_lock);
+ if (lcd.bl_tempo) {
+ lcd.bl_tempo = false;
+ if (!(lcd.flags & LCD_FLAG_L))
+ __lcd_backlight(0);
+ }
+ mutex_unlock(&lcd.bl_tempo_lock);
+}
+
+/* turn the backlight on for a little while */
+static void lcd_poke(void)
+{
+ if (lcd.pins.bl == PIN_NONE)
+ return;
+
+ cancel_delayed_work_sync(&lcd.bl_work);
+
+ mutex_lock(&lcd.bl_tempo_lock);
+ if (!lcd.bl_tempo && !(lcd.flags & LCD_FLAG_L))
+ __lcd_backlight(1);
+ lcd.bl_tempo = true;
+ schedule_delayed_work(&lcd.bl_work, FLASH_LIGHT_TEMPO * HZ);
+ mutex_unlock(&lcd.bl_tempo_lock);
+}
+
/* send a command to the LCD panel in serial mode */
static void lcd_write_cmd_s(int cmd)
{
@@ -907,6 +940,13 @@ static void lcd_gotoxy(void)
(lcd.hwidth - 1) : lcd.bwidth - 1));
}
+static void lcd_home(void)
+{
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
+ lcd_gotoxy();
+}
+
static void lcd_print(char c)
{
if (lcd.addr.x < lcd.bwidth) {
@@ -925,9 +965,7 @@ static void lcd_clear_fast_s(void)
{
int pos;
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
spin_lock_irq(&pprt_lock);
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -939,9 +977,7 @@ static void lcd_clear_fast_s(void)
}
spin_unlock_irq(&pprt_lock);
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
}
/* fills the display with spaces and resets X/Y */
@@ -949,9 +985,7 @@ static void lcd_clear_fast_p8(void)
{
int pos;
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
spin_lock_irq(&pprt_lock);
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -977,9 +1011,7 @@ static void lcd_clear_fast_p8(void)
}
spin_unlock_irq(&pprt_lock);
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
}
/* fills the display with spaces and resets X/Y */
@@ -987,9 +1019,7 @@ static void lcd_clear_fast_tilcd(void)
{
int pos;
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
spin_lock_irq(&pprt_lock);
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -1000,9 +1030,7 @@ static void lcd_clear_fast_tilcd(void)
spin_unlock_irq(&pprt_lock);
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
}
/* clears the display and resets X/Y */
@@ -1108,13 +1136,8 @@ static inline int handle_lcd_special_code(void)
processed = 1;
break;
case '*':
- /* flash back light using the keypad timer */
- if (scan_timer.function) {
- if (lcd.light_tempo == 0 &&
- ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(1);
- lcd.light_tempo = FLASH_LIGHT_TEMPO;
- }
+ /* flash back light */
+ lcd_poke();
processed = 1;
break;
case 'f': /* Small Font */
@@ -1278,21 +1301,14 @@ static inline int handle_lcd_special_code(void)
lcd_write_cmd(LCD_CMD_FUNCTION_SET
| LCD_CMD_DATA_LEN_8BITS
| ((lcd.flags & LCD_FLAG_F)
- ? LCD_CMD_TWO_LINES : 0)
- | ((lcd.flags & LCD_FLAG_N)
? LCD_CMD_FONT_5X10_DOTS
+ : 0)
+ | ((lcd.flags & LCD_FLAG_N)
+ ? LCD_CMD_TWO_LINES
: 0));
/* check whether L flag was changed */
- else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
- if (lcd.flags & (LCD_FLAG_L))
- lcd_backlight(1);
- else if (lcd.light_tempo == 0)
- /*
- * switch off the light only when the tempo
- * lighting is gone
- */
- lcd_backlight(0);
- }
+ else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L))
+ lcd_backlight(!!(lcd.flags & LCD_FLAG_L));
}
return processed;
@@ -1376,9 +1392,7 @@ static void lcd_write_char(char c)
processed = 1;
} else if (!strcmp(lcd.esc_seq.buf, "[H")) {
/* cursor to home */
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
+ lcd_home();
processed = 1;
}
/* codes starting with ^[[L */
@@ -1625,8 +1639,10 @@ static void lcd_init(void)
else
lcd_char_conv = NULL;
- if (lcd.pins.bl != PIN_NONE)
- init_scan_timer();
+ if (lcd.pins.bl != PIN_NONE) {
+ mutex_init(&lcd.bl_tempo_lock);
+ INIT_DELAYED_WORK(&lcd.bl_work, lcd_bl_off);
+ }
pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
lcd_bits[LCD_PORT_C][LCD_BIT_E]);
@@ -1655,14 +1671,11 @@ static void lcd_init(void)
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
#endif
#else
- panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
- PANEL_VERSION);
+ panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE);
#endif
- lcd.addr.x = 0;
- lcd.addr.y = 0;
/* clear the display on the next device opening */
lcd.must_clear = true;
- lcd_gotoxy();
+ lcd_home();
}
/*
@@ -1997,19 +2010,8 @@ static void panel_scan_timer(void)
panel_process_inputs();
}
- if (lcd.enabled && lcd.initialized) {
- if (keypressed) {
- if (lcd.light_tempo == 0 &&
- ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(1);
- lcd.light_tempo = FLASH_LIGHT_TEMPO;
- } else if (lcd.light_tempo > 0) {
- lcd.light_tempo--;
- if (lcd.light_tempo == 0 &&
- ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(0);
- }
- }
+ if (keypressed && lcd.enabled && lcd.initialized)
+ lcd_poke();
mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
}
@@ -2270,25 +2272,26 @@ static void panel_detach(struct parport *port)
if (scan_timer.function)
del_timer_sync(&scan_timer);
- if (pprt) {
- if (keypad.enabled) {
- misc_deregister(&keypad_dev);
- keypad_initialized = 0;
- }
+ if (keypad.enabled) {
+ misc_deregister(&keypad_dev);
+ keypad_initialized = 0;
+ }
- if (lcd.enabled) {
- panel_lcd_print("\x0cLCD driver " PANEL_VERSION
- "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
- misc_deregister(&lcd_dev);
- lcd.initialized = false;
+ if (lcd.enabled) {
+ panel_lcd_print("\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
+ misc_deregister(&lcd_dev);
+ if (lcd.pins.bl != PIN_NONE) {
+ cancel_delayed_work_sync(&lcd.bl_work);
+ __lcd_backlight(0);
}
-
- /* TODO: free all input signals */
- parport_release(pprt);
- parport_unregister_device(pprt);
- pprt = NULL;
- unregister_reboot_notifier(&panel_notifier);
+ lcd.initialized = false;
}
+
+ /* TODO: free all input signals */
+ parport_release(pprt);
+ parport_unregister_device(pprt);
+ pprt = NULL;
+ unregister_reboot_notifier(&panel_notifier);
}
static struct parport_driver panel_driver = {
@@ -2400,7 +2403,7 @@ static int __init panel_init_module(void)
if (!lcd.enabled && !keypad.enabled) {
/* no device enabled, let's exit */
- pr_err("driver version " PANEL_VERSION " disabled.\n");
+ pr_err("panel driver disabled.\n");
return -ENODEV;
}
@@ -2411,12 +2414,10 @@ static int __init panel_init_module(void)
}
if (pprt)
- pr_info("driver version " PANEL_VERSION
- " registered on parport%d (io=0x%lx).\n", parport,
- pprt->port->base);
+ pr_info("panel driver registered on parport%d (io=0x%lx).\n",
+ parport, pprt->port->base);
else
- pr_info("driver version " PANEL_VERSION
- " not yet registered\n");
+ pr_info("panel driver not yet registered\n");
return 0;
}
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
new file mode 100644
index 000000000000..ac522417c462
--- /dev/null
+++ b/drivers/misc/sram-exec.c
@@ -0,0 +1,105 @@
+/*
+ * SRAM protect-exec region helper functions
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Dave Gerlach
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/sram.h>
+
+#include <asm/cacheflush.h>
+
+#include "sram.h"
+
+static DEFINE_MUTEX(exec_pool_list_mutex);
+static LIST_HEAD(exec_pool_list);
+
+int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
+ struct sram_partition *part)
+{
+ unsigned long base = (unsigned long)part->base;
+ unsigned long end = base + block->size;
+
+ if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
+ dev_err(sram->dev,
+ "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int sram_add_protect_exec(struct sram_partition *part)
+{
+ mutex_lock(&exec_pool_list_mutex);
+ list_add_tail(&part->list, &exec_pool_list);
+ mutex_unlock(&exec_pool_list_mutex);
+
+ return 0;
+}
+
+/**
+ * sram_exec_copy - copy data to a protected executable region of sram
+ *
+ * @pool: struct gen_pool retrieved that is part of this sram
+ * @dst: Destination address for the copy, that must be inside pool
+ * @src: Source address for the data to copy
+ * @size: Size of copy to perform, which starting from dst, must reside in pool
+ *
+ * This helper function allows sram driver to act as central control location
+ * of 'protect-exec' pools which are normal sram pools but are always set
+ * read-only and executable except when copying data to them, at which point
+ * they are set to read-write non-executable, to make sure no memory is
+ * writeable and executable at the same time. This region must be page-aligned
+ * and is checked during probe, otherwise page attribute manipulation would
+ * not be possible.
+ */
+int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+ size_t size)
+{
+ struct sram_partition *part = NULL, *p;
+ unsigned long base;
+ int pages;
+
+ mutex_lock(&exec_pool_list_mutex);
+ list_for_each_entry(p, &exec_pool_list, list) {
+ if (p->pool == pool)
+ part = p;
+ }
+ mutex_unlock(&exec_pool_list_mutex);
+
+ if (!part)
+ return -EINVAL;
+
+ if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
+ return -EINVAL;
+
+ base = (unsigned long)part->base;
+ pages = PAGE_ALIGN(size) / PAGE_SIZE;
+
+ mutex_lock(&part->lock);
+
+ set_memory_nx((unsigned long)base, pages);
+ set_memory_rw((unsigned long)base, pages);
+
+ memcpy(dst, src, size);
+
+ set_memory_ro((unsigned long)base, pages);
+ set_memory_x((unsigned long)base, pages);
+
+ mutex_unlock(&part->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index b33ab8ce47ab..d1185b78cf9a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -31,35 +31,9 @@
#include <linux/mfd/syscon.h>
#include <soc/at91/atmel-secumod.h>
-#define SRAM_GRANULARITY 32
-
-struct sram_partition {
- void __iomem *base;
-
- struct gen_pool *pool;
- struct bin_attribute battr;
- struct mutex lock;
-};
-
-struct sram_dev {
- struct device *dev;
- void __iomem *virt_base;
-
- struct gen_pool *pool;
- struct clk *clk;
+#include "sram.h"
- struct sram_partition *partition;
- u32 partitions;
-};
-
-struct sram_reserve {
- struct list_head list;
- u32 start;
- u32 size;
- bool export;
- bool pool;
- const char *label;
-};
+#define SRAM_GRANULARITY 32
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
@@ -148,6 +122,18 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
if (ret)
return ret;
}
+ if (block->protect_exec) {
+ ret = sram_check_protect_exec(sram, block, part);
+ if (ret)
+ return ret;
+
+ ret = sram_add_pool(sram, block, start, part);
+ if (ret)
+ return ret;
+
+ sram_add_protect_exec(part);
+ }
+
sram->partitions++;
return 0;
@@ -233,7 +219,11 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
if (of_find_property(child, "pool", NULL))
block->pool = true;
- if ((block->export || block->pool) && block->size) {
+ if (of_find_property(child, "protect-exec", NULL))
+ block->protect_exec = true;
+
+ if ((block->export || block->pool || block->protect_exec) &&
+ block->size) {
exports++;
label = NULL;
@@ -249,8 +239,10 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
block->label = devm_kstrdup(sram->dev,
label, GFP_KERNEL);
- if (!block->label)
+ if (!block->label) {
+ ret = -ENOMEM;
goto err_chunks;
+ }
dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
block->export ? "exported " : "", block->label,
@@ -293,7 +285,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
goto err_chunks;
}
- if ((block->export || block->pool) && block->size) {
+ if ((block->export || block->pool || block->protect_exec) &&
+ block->size) {
ret = sram_add_partition(sram, block,
res->start + block->start);
if (ret) {
diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h
new file mode 100644
index 000000000000..c181ce4c8fca
--- /dev/null
+++ b/drivers/misc/sram.h
@@ -0,0 +1,58 @@
+/*
+ * Defines for the SRAM driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SRAM_H
+#define __SRAM_H
+
+struct sram_partition {
+ void __iomem *base;
+
+ struct gen_pool *pool;
+ struct bin_attribute battr;
+ struct mutex lock;
+ struct list_head list;
+};
+
+struct sram_dev {
+ struct device *dev;
+ void __iomem *virt_base;
+
+ struct gen_pool *pool;
+ struct clk *clk;
+
+ struct sram_partition *partition;
+ u32 partitions;
+};
+
+struct sram_reserve {
+ struct list_head list;
+ u32 start;
+ u32 size;
+ bool export;
+ bool pool;
+ bool protect_exec;
+ const char *label;
+};
+
+#ifdef CONFIG_SRAM_EXEC
+int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
+ struct sram_partition *part);
+int sram_add_protect_exec(struct sram_partition *part);
+#else
+static inline int sram_check_protect_exec(struct sram_dev *sram,
+ struct sram_reserve *block,
+ struct sram_partition *part)
+{
+ return -ENODEV;
+}
+
+static inline int sram_add_protect_exec(struct sram_partition *part)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_SRAM_EXEC */
+#endif /* __SRAM_H */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 189b32519748..9d659542a335 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -54,10 +54,7 @@ struct vmci_guest_device {
struct device *dev; /* PCI device we are attached to */
void __iomem *iobase;
- unsigned int irq;
- unsigned int intr_type;
bool exclusive_vectors;
- struct msix_entry msix_entries[VMCI_MAX_INTRS];
struct tasklet_struct datagram_tasklet;
struct tasklet_struct bm_tasklet;
@@ -369,30 +366,6 @@ static void vmci_process_bitmap(unsigned long data)
}
/*
- * Enable MSI-X. Try exclusive vectors first, then shared vectors.
- */
-static int vmci_enable_msix(struct pci_dev *pdev,
- struct vmci_guest_device *vmci_dev)
-{
- int i;
- int result;
-
- for (i = 0; i < VMCI_MAX_INTRS; ++i) {
- vmci_dev->msix_entries[i].entry = i;
- vmci_dev->msix_entries[i].vector = i;
- }
-
- result = pci_enable_msix_exact(pdev,
- vmci_dev->msix_entries, VMCI_MAX_INTRS);
- if (result == 0)
- vmci_dev->exclusive_vectors = true;
- else if (result == -ENOSPC)
- result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
-
- return result;
-}
-
-/*
* Interrupt handler for legacy or MSI interrupt, or for first MSI-X
* interrupt (vector VMCI_INTR_DATAGRAM).
*/
@@ -406,7 +379,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
* Otherwise we must read the ICR to determine what to do.
*/
- if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+ if (dev->exclusive_vectors) {
tasklet_schedule(&dev->datagram_tasklet);
} else {
unsigned int icr;
@@ -491,7 +464,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
}
vmci_dev->dev = &pdev->dev;
- vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
vmci_dev->exclusive_vectors = false;
vmci_dev->iobase = iobase;
@@ -592,26 +564,26 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* Enable interrupts. Try MSI-X first, then MSI, and then fallback on
* legacy interrupts.
*/
- if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
- vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
- vmci_dev->irq = vmci_dev->msix_entries[0].vector;
- } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
- vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
- vmci_dev->irq = pdev->irq;
+ error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
+ PCI_IRQ_MSIX);
+ if (error) {
+ error = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (error)
+ goto err_remove_bitmap;
} else {
- vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
- vmci_dev->irq = pdev->irq;
+ vmci_dev->exclusive_vectors = true;
}
/*
* Request IRQ for legacy or MSI interrupts, or for first
* MSI-X vector.
*/
- error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, vmci_dev);
+ error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev, "Irq %u in use: %d\n",
- vmci_dev->irq, error);
+ pci_irq_vector(pdev, 0), error);
goto err_disable_msi;
}
@@ -622,13 +594,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* between the vectors.
*/
if (vmci_dev->exclusive_vectors) {
- error = request_irq(vmci_dev->msix_entries[1].vector,
+ error = request_irq(pci_irq_vector(pdev, 1),
vmci_interrupt_bm, 0, KBUILD_MODNAME,
vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
- vmci_dev->msix_entries[1].vector, error);
+ pci_irq_vector(pdev, 1), error);
goto err_free_irq;
}
}
@@ -651,15 +623,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
return 0;
err_free_irq:
- free_irq(vmci_dev->irq, vmci_dev);
+ free_irq(pci_irq_vector(pdev, 0), vmci_dev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
err_disable_msi:
- if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
- pci_disable_msix(pdev);
- else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
@@ -719,14 +688,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
* MSI-X, we might have multiple vectors, each with their own
* IRQ, which we must free too.
*/
- free_irq(vmci_dev->irq, vmci_dev);
- if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
- if (vmci_dev->exclusive_vectors)
- free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
- pci_disable_msix(pdev);
- } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
- pci_disable_msi(pdev);
- }
+ if (vmci_dev->exclusive_vectors)
+ free_irq(pci_irq_vector(pdev, 1), vmci_dev);
+ free_irq(pci_irq_vector(pdev, 0), vmci_dev);
+ pci_free_irq_vectors(pdev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fd6ebbefd919..d35ebd993b38 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -703,8 +703,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
char *dest = start + (section_index * net_device->send_section_size)
+ pend_size;
int i;
- bool is_data_pkt = (skb != NULL) ? true : false;
- bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
u32 msg_size = 0;
u32 padding = 0;
u32 remain = packet->total_data_buflen % net_device->pkt_align;
@@ -712,7 +710,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
packet->page_buf_cnt;
/* Add padding */
- if (is_data_pkt && xmit_more && remain &&
+ if (skb && skb->xmit_more && remain &&
!packet->cp_partial) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
@@ -754,7 +752,6 @@ static inline int netvsc_send_pkt(
int ret;
struct hv_page_buffer *pgbuf;
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
- bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb != NULL) {
@@ -778,16 +775,6 @@ static inline int netvsc_send_pkt(
if (out_channel->rescind)
return -ENODEV;
- /*
- * It is possible that once we successfully place this packet
- * on the ringbuffer, we may stop the queue. In that case, we want
- * to notify the host independent of the xmit_more flag. We don't
- * need to be precise here; in the worst case we may signal the host
- * unnecessarily.
- */
- if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
- xmit_more = false;
-
if (packet->page_buf_cnt) {
pgbuf = packet->cp_partial ? (*pb) +
packet->rmsg_pgcnt : (*pb);
@@ -797,15 +784,13 @@ static inline int netvsc_send_pkt(
&nvmsg,
sizeof(struct nvsp_message),
req_id,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
- !xmit_more);
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
} else {
ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
sizeof(struct nvsp_message),
req_id,
VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
- !xmit_more);
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
if (ret == 0) {
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 398ea7f54826..408b521ee520 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -608,7 +608,7 @@ static struct nvmem_device *nvmem_find(const char *name)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
- * @dev node: Device tree node that uses the nvmem device
+ * @np: Device tree node that uses the nvmem device.
* @id: nvmem name from nvmem-names property.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
@@ -634,8 +634,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_device_get);
/**
* nvmem_device_get() - Get nvmem device from a given id
*
- * @dev : Device that uses the nvmem device
- * @id: nvmem name from nvmem-names property.
+ * @dev: Device that uses the nvmem device.
+ * @dev_name: name of the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
@@ -674,6 +674,7 @@ static void devm_nvmem_device_release(struct device *dev, void *res)
/**
* devm_nvmem_device_put() - put alredy got nvmem device
*
+ * @dev: Device that uses the nvmem device.
* @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
* that needs to be released.
*/
@@ -702,8 +703,8 @@ EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
* devm_nvmem_device_get() - Get nvmem cell of device form a given id
*
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem name in nvmems property.
+ * @dev: Device that requests the nvmem device.
+ * @id: name id for the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
* on success. The nvmem_cell will be freed by the automatically once the
@@ -745,8 +746,10 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem cell name from nvmem-cell-names property.
+ * @np: Device tree node that uses the nvmem cell.
+ * @name: nvmem cell name from nvmem-cell-names property, or NULL
+ * for the cell at index 0 (the lone cell with no accompanying
+ * nvmem-cell-names property).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -759,9 +762,12 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
struct nvmem_cell *cell;
struct nvmem_device *nvmem;
const __be32 *addr;
- int rval, len, index;
+ int rval, len;
+ int index = 0;
- index = of_property_match_string(np, "nvmem-cell-names", name);
+ /* if cell name exists, find index to the name */
+ if (name)
+ index = of_property_match_string(np, "nvmem-cell-names", name);
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
@@ -830,8 +836,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
/**
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
*
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem cell name to get.
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: nvmem cell name to get.
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -859,8 +865,8 @@ static void devm_nvmem_cell_release(struct device *dev, void *res)
/**
* devm_nvmem_cell_get() - Get nvmem cell of device form a given id
*
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem id in nvmem-names property.
+ * @dev: Device that requests the nvmem cell.
+ * @id: nvmem cell name id to get.
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -900,7 +906,8 @@ static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
* devm_nvmem_cell_put() - Release previously allocated nvmem cell
* from devm_nvmem_cell_get.
*
- * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
+ * @dev: Device that requests the nvmem cell.
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
*/
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
{
@@ -916,7 +923,7 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
/**
* nvmem_cell_put() - Release previously allocated nvmem cell.
*
- * @cell: Previously allocated nvmem cell by nvmem_cell_get()
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get().
*/
void nvmem_cell_put(struct nvmem_cell *cell)
{
@@ -970,7 +977,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
if (cell->bit_offset || cell->nbits)
nvmem_shift_read_buffer_in_place(cell, buf);
- *len = cell->bytes;
+ if (len)
+ *len = cell->bytes;
return 0;
}
@@ -979,7 +987,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
* nvmem_cell_read() - Read a given nvmem cell
*
* @cell: nvmem cell to be read.
- * @len: pointer to length of cell which will be populated on successful read.
+ * @len: pointer to length of cell which will be populated on successful read;
+ * can be NULL.
*
* Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
* buffer should be freed by the consumer with a kfree().
@@ -1126,7 +1135,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
* nvmem_device_cell_write() - Write cell to a given nvmem device
*
* @nvmem: nvmem device to be written to.
- * @info: nvmem cell info to be written
+ * @info: nvmem cell info to be written.
* @buf: buffer to be written to cell.
*
* Return: length of bytes written or negative error code on failure.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 8e7b120696fa..b8ca1e677b01 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -73,6 +73,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
{ .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
+ { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
index 1f52462f4cdd..dd9ea463c2a4 100644
--- a/drivers/platform/goldfish/pdev_bus.c
+++ b/drivers/platform/goldfish/pdev_bus.c
@@ -157,23 +157,26 @@ static int goldfish_new_pdev(void)
static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
{
irqreturn_t ret = IRQ_NONE;
+
while (1) {
u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
- switch (op) {
- case PDEV_BUS_OP_DONE:
- return IRQ_NONE;
+ switch (op) {
case PDEV_BUS_OP_REMOVE_DEV:
goldfish_pdev_remove();
+ ret = IRQ_HANDLED;
break;
case PDEV_BUS_OP_ADD_DEV:
goldfish_new_pdev();
+ ret = IRQ_HANDLED;
break;
+
+ case PDEV_BUS_OP_DONE:
+ default:
+ return ret;
}
- ret = IRQ_HANDLED;
}
- return ret;
}
static int goldfish_pdev_bus_probe(struct platform_device *pdev)
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 50958f167305..48d5327d38d4 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -125,7 +125,7 @@ hv_uio_probe(struct hv_device *dev,
goto fail;
dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- dev->channel->batched_reading = false;
+ set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
/* Fill general uio info */
pdata->info.name = "uio_hv_generic";
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index bdbadaa47ef3..0035cf79760a 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1625,10 +1625,25 @@ static int vme_bus_probe(struct device *dev)
return retval;
}
+static int vme_bus_remove(struct device *dev)
+{
+ int retval = -ENODEV;
+ struct vme_driver *driver;
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
+
+ driver = dev->platform_data;
+
+ if (driver->remove != NULL)
+ retval = driver->remove(vdev);
+
+ return retval;
+}
+
struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
+ .remove = vme_bus_remove,
};
EXPORT_SYMBOL(vme_bus_type);
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 049a884a756f..be77b7914fad 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -153,6 +153,9 @@ struct ds_device
*/
u16 spu_bit;
+ u8 st_buf[ST_SIZE];
+ u8 byte_buf;
+
struct w1_bus_master master;
};
@@ -174,7 +177,6 @@ struct ds_status
u8 data_in_buffer_status;
u8 reserved1;
u8 reserved2;
-
};
static struct usb_device_id ds_id_table [] = {
@@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
return err;
}
-static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
- unsigned char *buf, int size)
-{
- int count, err;
-
- memset(st, 0, sizeof(*st));
-
- count = 0;
- err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
- dev->ep[EP_STATUS]), buf, size, &count, 1000);
- if (err < 0) {
- pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
- dev->ep[EP_STATUS], err);
- return err;
- }
-
- if (count >= sizeof(*st))
- memcpy(st, buf, sizeof(*st));
-
- return count;
-}
-
static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
{
pr_info("%45s: %8x\n", str, buf[off]);
@@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
}
}
+static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
+ bool dump)
+{
+ int count, err;
+
+ if (st)
+ memset(st, 0, sizeof(*st));
+
+ count = 0;
+ err = usb_interrupt_msg(dev->udev,
+ usb_rcvintpipe(dev->udev,
+ dev->ep[EP_STATUS]),
+ dev->st_buf, sizeof(dev->st_buf),
+ &count, 1000);
+ if (err < 0) {
+ pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
+ dev->ep[EP_STATUS], err);
+ return err;
+ }
+
+ if (dump)
+ ds_dump_status(dev, dev->st_buf, count);
+
+ if (st && count >= sizeof(*st))
+ memcpy(st, dev->st_buf, sizeof(*st));
+
+ return count;
+}
+
static void ds_reset_device(struct ds_device *dev)
{
ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
@@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev)
static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
{
int count, err;
- struct ds_status st;
/* Careful on size. If size is less than what is available in
* the input buffer, the device fails the bulk transfer and
@@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
- u8 buf[ST_SIZE];
- int count;
-
pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
-
- count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
- ds_dump_status(dev, buf, count);
+ ds_recv_status(dev, NULL, true);
return err;
}
@@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
{
struct ds_status st;
int count = 0, err = 0;
- u8 buf[ST_SIZE];
do {
err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
err = ds_send_control(dev, CTL_RESUME_EXE, 0);
if (err)
break;
- err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
+ err = ds_recv_status(dev, &st, false);
if (err)
break;
@@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
{
- u8 buf[ST_SIZE];
int err, count = 0;
do {
st->status = 0;
- err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
+ err = ds_recv_status(dev, st, false);
#if 0
if (err >= 0) {
int i;
printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
for (i=0; i<err; ++i)
- printk("%02x ", buf[i]);
+ printk("%02x ", dev->st_buf[i]);
printk("\n");
}
#endif
@@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
* can do something with it).
*/
if (err > 16 || count >= 100 || err < 0)
- ds_dump_status(dev, buf, err);
+ ds_dump_status(dev, dev->st_buf, err);
/* Extended data isn't an error. Well, a short is, but the dump
* would have already told the user that and we can't do anything
@@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
{
int err;
struct ds_status st;
- u8 rbyte;
err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
if (err)
@@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
if (err)
return err;
- err = ds_recv_data(dev, &rbyte, sizeof(rbyte));
+ err = ds_recv_data(dev, &dev->byte_buf, 1);
if (err < 0)
return err;
- return !(byte == rbyte);
+ return !(byte == dev->byte_buf);
}
static int ds_read_byte(struct ds_device *dev, u8 *byte)
@@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master,
int err;
u16 value, index;
struct ds_status st;
- u8 st_buf[ST_SIZE];
int search_limit;
int found = 0;
int i;
@@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master,
/* FIFO 128 bytes, bulk packet size 64, read a multiple of the
* packet size.
*/
- u64 buf[2*64/8];
+ const size_t bufsize = 2 * 64;
+ u64 *buf;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return;
mutex_lock(&master->bus_mutex);
@@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master,
do {
schedule_timeout(jtime);
- if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
- sizeof(st)) {
+ err = ds_recv_status(dev, &st, false);
+ if (err < 0 || err < sizeof(st))
break;
- }
if (st.data_in_buffer_status) {
/* Bulk in can receive partial ids, but when it does
@@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
* bulk without first checking if status says there
* is data to read.
*/
- err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
+ err = ds_recv_data(dev, (u8 *)buf, bufsize);
if (err < 0)
break;
for (i = 0; i < err/8; ++i) {
@@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master,
}
search_out:
mutex_unlock(&master->bus_mutex);
+ kfree(buf);
}
#if 0
+/*
+ * FIXME: if this disabled code is ever used in the future all ds_send_data()
+ * calls must be changed to use a DMAable buffer.
+ */
static int ds_match_access(struct ds_device *dev, u64 init)
{
int err;
@@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init)
static u8 ds9490r_touch_bit(void *data, u8 bit)
{
- u8 ret;
struct ds_device *dev = data;
- if (ds_touch_bit(dev, bit, &ret))
+ if (ds_touch_bit(dev, bit, &dev->byte_buf))
return 0;
- return ret;
+ return dev->byte_buf;
}
#if 0
@@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data)
{
struct ds_device *dev = data;
int err;
- u8 bit = 0;
- err = ds_touch_bit(dev, 1, &bit);
+ err = ds_touch_bit(dev, 1, &dev->byte_buf);
if (err)
return 0;
- return bit & 1;
+ return dev->byte_buf & 1;
}
#endif
@@ -887,32 +893,51 @@ static u8 ds9490r_read_byte(void *data)
{
struct ds_device *dev = data;
int err;
- u8 byte = 0;
- err = ds_read_byte(dev, &byte);
+ err = ds_read_byte(dev, &dev->byte_buf);
if (err)
return 0;
- return byte;
+ return dev->byte_buf;
}
static void ds9490r_write_block(void *data, const u8 *buf, int len)
{
struct ds_device *dev = data;
+ u8 *tbuf;
+
+ if (len <= 0)
+ return;
+
+ tbuf = kmemdup(buf, len, GFP_KERNEL);
+ if (!tbuf)
+ return;
- ds_write_block(dev, (u8 *)buf, len);
+ ds_write_block(dev, tbuf, len);
+
+ kfree(tbuf);
}
static u8 ds9490r_read_block(void *data, u8 *buf, int len)
{
struct ds_device *dev = data;
int err;
+ u8 *tbuf;
- err = ds_read_block(dev, buf, len);
- if (err < 0)
+ if (len <= 0)
+ return 0;
+
+ tbuf = kmalloc(len, GFP_KERNEL);
+ if (!tbuf)
return 0;
- return len;
+ err = ds_read_block(dev, tbuf, len);
+ if (err >= 0)
+ memcpy(buf, tbuf, len);
+
+ kfree(tbuf);
+
+ return err >= 0 ? len : 0;
}
static u8 ds9490r_reset(void *data)
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index bb09de633939..fb190c259607 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -715,7 +715,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
ret = _omap_hdq_reset(hdq_data);
if (ret) {
dev_dbg(&pdev->dev, "reset failed\n");
- return -EINVAL;
+ goto err_irq;
}
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index cfe74d09932e..0ef9f2663dbd 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,14 @@ config W1_SLAVE_SMEM
Say Y here if you want to connect 1-wire
simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
+config W1_SLAVE_DS2405
+ tristate "DS2405 Addressable Switch"
+ help
+ Say Y or M here if you want to use a DS2405 1-wire
+ single-channel addressable switch.
+ This device can also work as a single-channel
+ binary remote sensor.
+
config W1_SLAVE_DS2408
tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 1e9989afe7bf..b4a358955ef9 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2405) += w1_ds2405.o
obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
new file mode 100644
index 000000000000..d5d54876cb64
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2405.c
@@ -0,0 +1,227 @@
+/*
+ * w1_ds2405.c
+ *
+ * Copyright (c) 2017 Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ * Based on w1_therm.c copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../w1.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas DS2405 PIO.");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2405));
+
+static int w1_ds2405_select(struct w1_slave *sl, bool only_active)
+{
+ struct w1_master *dev = sl->master;
+
+ u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
+ unsigned int bit_ctr;
+
+ if (w1_reset_bus(dev) != 0)
+ return 0;
+
+ /*
+ * We cannot use a normal Match ROM command
+ * since doing so would toggle PIO state
+ */
+ w1_write_8(dev, only_active ? W1_ALARM_SEARCH : W1_SEARCH);
+
+ for (bit_ctr = 0; bit_ctr < 64; bit_ctr++) {
+ int bit2send = !!(dev_addr & BIT(bit_ctr));
+ u8 ret;
+
+ ret = w1_triplet(dev, bit2send);
+
+ if ((ret & (BIT(0) | BIT(1))) ==
+ (BIT(0) | BIT(1))) /* no devices found */
+ return 0;
+
+ if (!!(ret & BIT(2)) != bit2send)
+ /* wrong direction taken - no such device */
+ return 0;
+ }
+
+ return 1;
+}
+
+static int w1_ds2405_read_pio(struct w1_slave *sl)
+{
+ if (w1_ds2405_select(sl, true))
+ return 0; /* "active" means PIO is low */
+
+ if (w1_ds2405_select(sl, false))
+ return 1;
+
+ return -ENODEV;
+}
+
+static ssize_t state_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+
+ int ret;
+ ssize_t f_retval;
+ u8 state;
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret)
+ return ret;
+
+ if (!w1_ds2405_select(sl, false)) {
+ f_retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ state = w1_read_8(dev);
+ if (state != 0 &&
+ state != 0xff) {
+ dev_err(device, "non-consistent state %x\n", state);
+ f_retval = -EIO;
+ goto out_unlock;
+ }
+
+ *buf = state ? '1' : '0';
+ f_retval = 1;
+
+out_unlock:
+ w1_reset_bus(dev);
+ mutex_unlock(&dev->bus_mutex);
+
+ return f_retval;
+}
+
+static ssize_t output_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+
+ int ret;
+ ssize_t f_retval;
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret)
+ return ret;
+
+ ret = w1_ds2405_read_pio(sl);
+ if (ret < 0) {
+ f_retval = ret;
+ goto out_unlock;
+ }
+
+ *buf = ret ? '1' : '0';
+ f_retval = 1;
+
+out_unlock:
+ w1_reset_bus(dev);
+ mutex_unlock(&dev->bus_mutex);
+
+ return f_retval;
+}
+
+static ssize_t output_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+
+ int ret, current_pio;
+ unsigned int val;
+ ssize_t f_retval;
+
+ if (count < 1)
+ return -EINVAL;
+
+ if (sscanf(buf, " %u%n", &val, &ret) < 1)
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ f_retval = ret;
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret)
+ return ret;
+
+ current_pio = w1_ds2405_read_pio(sl);
+ if (current_pio < 0) {
+ f_retval = current_pio;
+ goto out_unlock;
+ }
+
+ if (current_pio == val)
+ goto out_unlock;
+
+ if (w1_reset_bus(dev) != 0) {
+ f_retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ /*
+ * can't use w1_reset_select_slave() here since it uses Skip ROM if
+ * there is only one device on bus
+ */
+ do {
+ u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
+ u8 cmd[9];
+
+ cmd[0] = W1_MATCH_ROM;
+ memcpy(&cmd[1], &dev_addr, sizeof(dev_addr));
+
+ w1_write_block(dev, cmd, sizeof(cmd));
+ } while (0);
+
+out_unlock:
+ w1_reset_bus(dev);
+ mutex_unlock(&dev->bus_mutex);
+
+ return f_retval;
+}
+
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RW(output);
+
+static struct attribute *w1_ds2405_attrs[] = {
+ &dev_attr_state.attr,
+ &dev_attr_output.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(w1_ds2405);
+
+static struct w1_family_ops w1_ds2405_fops = {
+ .groups = w1_ds2405_groups
+};
+
+static struct w1_family w1_family_ds2405 = {
+ .fid = W1_FAMILY_DS2405,
+ .fops = &w1_ds2405_fops
+};
+
+module_w1_family(w1_family_ds2405);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index e213c678bbfe..90a3d9338fd2 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,9 +1,6 @@
/*
- * w1.c
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
@@ -763,6 +756,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
sl->name);
w1_family_put(sl->family);
+ atomic_dec(&sl->master->refcnt);
kfree(sl);
return err;
}
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 129895f562b0..758a7a6322e9 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,9 +1,6 @@
/*
- * w1.h
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __W1_H
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 1dc3051f7d76..df1c9bb90eb5 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,9 +1,6 @@
/*
- * w1_family.c
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/spinlock.h>
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 10a7a0767187..c4a6b257a367 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,9 +1,6 @@
/*
- * w1_family.h
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __W1_FAMILY_H
@@ -30,6 +23,7 @@
#define W1_FAMILY_BQ27000 0x01
#define W1_FAMILY_SMEM_01 0x01
#define W1_FAMILY_SMEM_81 0x81
+#define W1_FAMILY_DS2405 0x05
#define W1_THERM_DS18S20 0x10
#define W1_FAMILY_DS28E04 0x1C
#define W1_COUNTER_DS2423 0x1D
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 20f766afa4c7..4ce1b66d5092 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,9 +1,6 @@
/*
- * w1_int.c
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 2ad7d4414bed..371989159216 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,9 +1,6 @@
/*
- * w1_int.h
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __W1_INT_H
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index f4bc8c100a01..de8bebc27896 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,9 +1,6 @@
/*
- * w1_io.c
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
@@ -233,6 +226,7 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
return retval;
}
}
+EXPORT_SYMBOL_GPL(w1_triplet);
/**
* w1_read_8() - Reads 8 bits.
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index f9eecff23b8d..dd1422b6afbb 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,9 +1,6 @@
/*
- * w1_log.h
- *
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __W1_LOG_H
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 881597a191b8..49e520ca79c5 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,9 +1,6 @@
/*
- * w1_netlink.c
- *
* Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index c99a9ce05e62..b389e5ff5fa5 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,9 +1,6 @@
/*
- * w1_netlink.h
- *
* Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __W1_NETLINK_H