summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 20:55:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 20:55:06 +0300
commitb9bb6fb73b3e112d241a5edd146740be9a0c3cc0 (patch)
treed65072d0371468d685b7464dc6b38f920c0c9666 /drivers
parent15ce2658ddbd3db20dfba3622f3d224f01837fdc (diff)
parent9abbfb486f5c254805bb6a3f263bc14d989eb90b (diff)
downloadlinux-b9bb6fb73b3e112d241a5edd146740be9a0c3cc0.tar.xz
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio updates from Rusty Russell: "Some virtio internal cleanups, a new virtio device "virtio input", and a change to allow the legacy virtio balloon. Most excitingly, some lguest work! No seriously, I got some cleanup patches" * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: virtio: drop virtio_device_is_legacy_only virtio_pci: support non-legacy balloon devices virtio_mmio: support non-legacy balloon devices virtio_ccw: support non-legacy balloon devices virtio: balloon might not be a legacy device virtio_balloon: transitional interface virtio_ring: Update weak barriers to use dma_wmb/rmb virtio_pci_modern: switch to type-safe io accessors virtio_pci_modern: type-safe io accessors lguest: handle traps on the "interrupt suppressed" iret instruction. virtio: drop a useless config read virtio_config: reorder functions Add virtio-input driver. lguest: suppress interrupts for single insn, not range. lguest: simplify lguest_iret lguest: rename i386_head.S in the comments lguest: explicitly set miscdevice's private_data NULL lguest: fix pending interrupt test.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lguest/hypercalls.c5
-rw-r--r--drivers/lguest/interrupts_and_traps.c105
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c8
-rw-r--r--drivers/s390/kvm/virtio_ccw.c10
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_input.c384
-rw-r--r--drivers/virtio/virtio_mmio.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c123
12 files changed, 578 insertions, 105 deletions
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 1219af493c0f..19a32280731d 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu)
/*
* The Guest tells us where we're not to deliver interrupts by putting
- * the range of addresses into "struct lguest_data".
+ * the instruction address into "struct lguest_data".
*/
- if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
- || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
+ if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/*
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 70dfcdc29f1f..5e7559be222a 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
}
/*H:210
- * The set_guest_interrupt() routine actually delivers the interrupt or
- * trap. The mechanics of delivering traps and interrupts to the Guest are the
- * same, except some traps have an "error code" which gets pushed onto the
- * stack as well: the caller tells us if this is one.
- *
- * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
- * interrupt or trap. It's split into two parts for traditional reasons: gcc
- * on i386 used to be frightened by 64 bit numbers.
+ * The push_guest_interrupt_stack() routine saves Guest state on the stack for
+ * an interrupt or trap. The mechanics of delivering traps and interrupts to
+ * the Guest are the same, except some traps have an "error code" which gets
+ * pushed onto the stack as well: the caller tells us if this is one.
*
* We set up the stack just like the CPU does for a real interrupt, so it's
* identical for the Guest (and the standard "iret" instruction will undo
* it).
*/
-static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
- bool has_err)
+static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
{
unsigned long gstack, origstack;
u32 eflags, ss, irq_enable;
@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
if (has_err)
push_guest_stack(cpu, &gstack, cpu->regs->errcode);
- /*
- * Now we've pushed all the old state, we change the stack, the code
- * segment and the address to execute.
- */
+ /* Adjust the stack pointer and stack segment. */
cpu->regs->ss = ss;
cpu->regs->esp = virtstack + (gstack - origstack);
+}
+
+/*
+ * This actually makes the Guest start executing the given interrupt/trap
+ * handler.
+ *
+ * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
+ * interrupt or trap. It's split into two parts for traditional reasons: gcc
+ * on i386 used to be frightened by 64 bit numbers.
+ */
+static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
+{
+ /* If we're already in the kernel, we don't change stacks. */
+ if ((cpu->regs->ss&0x3) != GUEST_PL)
+ cpu->regs->ss = cpu->esp1;
+
+ /*
+ * Set the code segment and the address to execute.
+ */
cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
cpu->regs->eip = idt_address(lo, hi);
@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
kill_guest(cpu, "Disabling interrupts");
}
+/* This restores the eflags word which was pushed on the stack by a trap */
+static void restore_eflags(struct lg_cpu *cpu)
+{
+ /* This is the physical address of the stack. */
+ unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
+
+ /*
+ * Stack looks like this:
+ * Address Contents
+ * esp EIP
+ * esp + 4 CS
+ * esp + 8 EFLAGS
+ */
+ cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+}
+
/*H:205
* Virtual Interrupts.
*
@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
BUG_ON(irq >= LGUEST_IRQS);
- /*
- * They may be in the middle of an iret, where they asked us never to
- * deliver interrupts.
- */
- if (cpu->regs->eip >= cpu->lg->noirq_start &&
- (cpu->regs->eip < cpu->lg->noirq_end))
- return;
-
/* If they're halted, interrupts restart them. */
if (cpu->halted) {
/* Re-enable interrupts. */
@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
if (idt_present(idt->a, idt->b)) {
/* OK, mark it no longer pending and deliver it. */
clear_bit(irq, cpu->irqs_pending);
+
/*
- * set_guest_interrupt() takes the interrupt descriptor and a
- * flag to say whether this interrupt pushes an error code onto
- * the stack as well: virtual interrupts never do.
+ * They may be about to iret, where they asked us never to
+ * deliver interrupts. In this case, we can emulate that iret
+ * then immediately deliver the interrupt. This is basically
+ * a noop: the iret would pop the interrupt frame and restore
+ * eflags, and then we'd set it up again. So just restore the
+ * eflags word and jump straight to the handler in this case.
+ *
+ * Denys Vlasenko points out that this isn't quite right: if
+ * the iret was returning to userspace, then that interrupt
+ * would reset the stack pointer (which the Guest told us
+ * about via LHCALL_SET_STACK). But unless the Guest is being
+ * *really* weird, that will be the same as the current stack
+ * anyway.
*/
- set_guest_interrupt(cpu, idt->a, idt->b, false);
+ if (cpu->regs->eip == cpu->lg->noirq_iret) {
+ restore_eflags(cpu);
+ } else {
+ /*
+ * set_guest_interrupt() takes a flag to say whether
+ * this interrupt pushes an error code onto the stack
+ * as well: virtual interrupts never do.
+ */
+ push_guest_interrupt_stack(cpu, false);
+ }
+ /* Actually make Guest cpu jump to handler. */
+ guest_run_interrupt(cpu, idt->a, idt->b);
}
/*
@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*/
if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
return false;
- set_guest_interrupt(cpu, cpu->arch.idt[num].a,
- cpu->arch.idt[num].b, has_err(num));
+ push_guest_interrupt_stack(cpu, has_err(num));
+ guest_run_interrupt(cpu, cpu->arch.idt[num].a,
+ cpu->arch.idt[num].b);
return true;
}
@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num)
* The Guest has the ability to turn its interrupt gates into trap gates,
* if it is careful. The Host will let trap gates can go directly to the
* Guest, but the Guest needs the interrupts atomically disabled for an
- * interrupt gate. It can do this by pointing the trap gate at instructions
- * within noirq_start and noirq_end, where it can safely disable interrupts.
+ * interrupt gate. The Host could provide a mechanism to register more
+ * "no-interrupt" regions, and the Guest could point the trap gate at
+ * instructions within that region, where it can safely disable interrupts.
*/
/*M:006
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 307e8b39e7d1..ac8ad0461e80 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -102,7 +102,7 @@ struct lguest {
struct pgdir pgdirs[4];
- unsigned long noirq_start, noirq_end;
+ unsigned long noirq_iret;
unsigned int stack_pages;
u32 tsc_khz;
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index c4c6113eb9a6..30c60687d277 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in,
}
}
+static int open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
/*L:060
* The final piece of interface code is the close() routine. It reverses
* everything done in initialize(). This is usually called because the
@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file)
*/
static const struct file_operations lguest_fops = {
.owner = THIS_MODULE,
+ .open = open,
.release = close,
.write = write,
.read = read,
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 71d7802aa8b4..6f1fa1773e76 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
- if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
- vcdev->revision = 0;
- } else {
- ret = virtio_ccw_set_transport_rev(vcdev);
- if (ret)
- goto out_free;
- }
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ goto out_free;
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index b546da5d8ea3..cab9f3f63a38 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -48,6 +48,16 @@ config VIRTIO_BALLOON
If unsure, say M.
+config VIRTIO_INPUT
+ tristate "Virtio input driver"
+ depends on VIRTIO
+ depends on INPUT
+ ---help---
+ This driver supports virtio input devices such as
+ keyboards, mice and tablets.
+
+ If unsure, say M.
+
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index d85565b8ea46..41e30e3dc842 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
+obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 5ce2aa48fc6e..b1877d73fa56 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id)
-{
- return id.device == VIRTIO_ID_BALLOON;
-}
-EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
-
int register_virtio_driver(struct virtio_driver *driver)
{
/* Catch this early. */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6a356e344f82..82e80e034f25 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
u16 tag, u64 val)
{
BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
- vb->stats[idx].tag = tag;
- vb->stats[idx].val = val;
+ vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
+ vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
}
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev)
static inline s64 towards_target(struct virtio_balloon *vb)
{
- __le32 v;
s64 target;
+ u32 num_pages;
- virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
- target = le32_to_cpu(v);
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ num_pages = le32_to_cpu((__force __le32)num_pages);
+
+ target = num_pages;
return target - vb->num_pages;
}
static void update_balloon_size(struct virtio_balloon *vb)
{
- __le32 actual = cpu_to_le32(vb->num_pages);
+ u32 actual = vb->num_pages;
+
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ actual = (__force u32)cpu_to_le32(actual);
virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
new file mode 100644
index 000000000000..60e2a1677563
--- /dev/null
+++ b/drivers/virtio/virtio_input.c
@@ -0,0 +1,384 @@
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/input.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_input.h>
+
+struct virtio_input {
+ struct virtio_device *vdev;
+ struct input_dev *idev;
+ char name[64];
+ char serial[64];
+ char phys[64];
+ struct virtqueue *evt, *sts;
+ struct virtio_input_event evts[64];
+ spinlock_t lock;
+ bool ready;
+};
+
+static void virtinput_queue_evtbuf(struct virtio_input *vi,
+ struct virtio_input_event *evtbuf)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, evtbuf, sizeof(*evtbuf));
+ virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
+}
+
+static void virtinput_recv_events(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *event;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) {
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_event(vi->idev,
+ le16_to_cpu(event->type),
+ le16_to_cpu(event->code),
+ le32_to_cpu(event->value));
+ spin_lock_irqsave(&vi->lock, flags);
+ virtinput_queue_evtbuf(vi, event);
+ }
+ virtqueue_kick(vq);
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+/*
+ * On error we are losing the status update, which isn't critical as
+ * this is typically used for stuff like keyboard leds.
+ */
+static int virtinput_send_status(struct virtio_input *vi,
+ u16 type, u16 code, s32 value)
+{
+ struct virtio_input_event *stsbuf;
+ struct scatterlist sg[1];
+ unsigned long flags;
+ int rc;
+
+ stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC);
+ if (!stsbuf)
+ return -ENOMEM;
+
+ stsbuf->type = cpu_to_le16(type);
+ stsbuf->code = cpu_to_le16(code);
+ stsbuf->value = cpu_to_le32(value);
+ sg_init_one(sg, stsbuf, sizeof(*stsbuf));
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ rc = virtqueue_add_outbuf(vi->sts, sg, 1, stsbuf, GFP_ATOMIC);
+ virtqueue_kick(vi->sts);
+ } else {
+ rc = -ENODEV;
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ if (rc != 0)
+ kfree(stsbuf);
+ return rc;
+}
+
+static void virtinput_recv_status(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *stsbuf;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ while ((stsbuf = virtqueue_get_buf(vi->sts, &len)) != NULL)
+ kfree(stsbuf);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_status(struct input_dev *idev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct virtio_input *vi = input_get_drvdata(idev);
+
+ return virtinput_send_status(vi, type, code, value);
+}
+
+static u8 virtinput_cfg_select(struct virtio_input *vi,
+ u8 select, u8 subsel)
+{
+ u8 size;
+
+ virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
+ virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
+ virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+ return size;
+}
+
+static void virtinput_cfg_bits(struct virtio_input *vi, int select, int subsel,
+ unsigned long *bits, unsigned int bitcount)
+{
+ unsigned int bit;
+ u8 *virtio_bits;
+ u8 bytes;
+
+ bytes = virtinput_cfg_select(vi, select, subsel);
+ if (!bytes)
+ return;
+ if (bitcount > bytes * 8)
+ bitcount = bytes * 8;
+
+ /*
+ * Bitmap in virtio config space is a simple stream of bytes,
+ * with the first byte carrying bits 0-7, second bits 8-15 and
+ * so on.
+ */
+ virtio_bits = kzalloc(bytes, GFP_KERNEL);
+ if (!virtio_bits)
+ return;
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.bitmap),
+ virtio_bits, bytes);
+ for (bit = 0; bit < bitcount; bit++) {
+ if (virtio_bits[bit / 8] & (1 << (bit % 8)))
+ __set_bit(bit, bits);
+ }
+ kfree(virtio_bits);
+
+ if (select == VIRTIO_INPUT_CFG_EV_BITS)
+ __set_bit(subsel, vi->idev->evbit);
+}
+
+static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
+{
+ u32 mi, ma, re, fu, fl;
+
+ virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+ input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
+ input_abs_set_res(vi->idev, abs, re);
+}
+
+static int virtinput_init_vqs(struct virtio_input *vi)
+{
+ struct virtqueue *vqs[2];
+ vq_callback_t *cbs[] = { virtinput_recv_events,
+ virtinput_recv_status };
+ static const char *names[] = { "events", "status" };
+ int err;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
+ if (err)
+ return err;
+ vi->evt = vqs[0];
+ vi->sts = vqs[1];
+
+ return 0;
+}
+
+static void virtinput_fill_evt(struct virtio_input *vi)
+{
+ unsigned long flags;
+ int i, size;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ size = virtqueue_get_vring_size(vi->evt);
+ if (size > ARRAY_SIZE(vi->evts))
+ size = ARRAY_SIZE(vi->evts);
+ for (i = 0; i < size; i++)
+ virtinput_queue_evtbuf(vi, &vi->evts[i]);
+ virtqueue_kick(vi->evt);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_probe(struct virtio_device *vdev)
+{
+ struct virtio_input *vi;
+ unsigned long flags;
+ size_t size;
+ int abs, err;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vdev->priv = vi;
+ vi->vdev = vdev;
+ spin_lock_init(&vi->lock);
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ goto err_init_vq;
+
+ vi->idev = input_allocate_device();
+ if (!vi->idev) {
+ err = -ENOMEM;
+ goto err_input_alloc;
+ }
+ input_set_drvdata(vi->idev, vi);
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_NAME, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->name, min(size, sizeof(vi->name)));
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_SERIAL, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->serial, min(size, sizeof(vi->serial)));
+ snprintf(vi->phys, sizeof(vi->phys),
+ "virtio%d/input0", vdev->index);
+ vi->idev->name = vi->name;
+ vi->idev->phys = vi->phys;
+ vi->idev->uniq = vi->serial;
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
+ if (size >= sizeof(struct virtio_input_devids)) {
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.bustype, &vi->idev->id.bustype);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.vendor, &vi->idev->id.vendor);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.product, &vi->idev->id.product);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.version, &vi->idev->id.version);
+ } else {
+ vi->idev->id.bustype = BUS_VIRTUAL;
+ }
+
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_PROP_BITS, 0,
+ vi->idev->propbit, INPUT_PROP_CNT);
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REP);
+ if (size)
+ __set_bit(EV_REP, vi->idev->evbit);
+
+ vi->idev->dev.parent = &vdev->dev;
+ vi->idev->event = virtinput_status;
+
+ /* device -> kernel */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY,
+ vi->idev->keybit, KEY_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REL,
+ vi->idev->relbit, REL_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS,
+ vi->idev->absbit, ABS_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_MSC,
+ vi->idev->mscbit, MSC_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SW,
+ vi->idev->swbit, SW_CNT);
+
+ /* kernel -> device */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_LED,
+ vi->idev->ledbit, LED_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SND,
+ vi->idev->sndbit, SND_CNT);
+
+ if (test_bit(EV_ABS, vi->idev->evbit)) {
+ for (abs = 0; abs < ABS_CNT; abs++) {
+ if (!test_bit(abs, vi->idev->absbit))
+ continue;
+ virtinput_cfg_abs(vi, abs);
+ }
+ }
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ err = input_register_device(vi->idev);
+ if (err)
+ goto err_input_register;
+
+ virtinput_fill_evt(vi);
+ return 0;
+
+err_input_register:
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_free_device(vi->idev);
+err_input_alloc:
+ vdev->config->del_vqs(vdev);
+err_init_vq:
+ kfree(vi);
+ return err;
+}
+
+static void virtinput_remove(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ input_unregister_device(vi->idev);
+ vdev->config->del_vqs(vdev);
+ kfree(vi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtinput_freeze(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtinput_restore(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ int err;
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ virtinput_fill_evt(vi);
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_input_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtinput_probe,
+ .remove = virtinput_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtinput_freeze,
+ .restore = virtinput_restore,
+#endif
+};
+
+module_virtio_driver(virtio_input_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtio input device driver");
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 6010d7ec0a0f..7a5e60dea6c5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- /* Reject legacy-only IDs for version 2 devices */
- if (vm_dev->version == 2 &&
- virtio_device_is_legacy_only(vm_dev->vdev.id)) {
- dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
- vm_dev->vdev.id.device);
- return -ENODEV;
- }
-
if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 2aa38e59db2e..e88e0997a889 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -20,6 +20,50 @@
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (u16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(u32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
static void __iomem *map_capability(struct pci_dev *dev, int off,
size_t minlen,
u32 align,
@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
return p;
}
-static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
-{
- iowrite32((u32)val, lo);
- iowrite32(val >> 32, hi);
-}
-
/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u64 features;
- iowrite32(0, &vp_dev->common->device_feature_select);
- features = ioread32(&vp_dev->common->device_feature);
- iowrite32(1, &vp_dev->common->device_feature_select);
- features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
+ vp_iowrite32(0, &vp_dev->common->device_feature_select);
+ features = vp_ioread32(&vp_dev->common->device_feature);
+ vp_iowrite32(1, &vp_dev->common->device_feature_select);
+ features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
return features;
}
@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- iowrite32(0, &vp_dev->common->guest_feature_select);
- iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
- iowrite32(1, &vp_dev->common->guest_feature_select);
- iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+ vp_iowrite32(0, &vp_dev->common->guest_feature_select);
+ vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
+ vp_iowrite32(1, &vp_dev->common->guest_feature_select);
+ vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
return 0;
}
@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->config_generation);
+ return vp_ioread8(&vp_dev->common->config_generation);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->device_status);
+ return vp_ioread8(&vp_dev->common->device_status);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- iowrite8(status, &vp_dev->common->device_status);
+ vp_iowrite8(status, &vp_dev->common->device_status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
- iowrite8(0, &vp_dev->common->device_status);
+ vp_iowrite8(0, &vp_dev->common->device_status);
/* Flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any. */
- ioread8(&vp_dev->common->device_status);
+ vp_ioread8(&vp_dev->common->device_status);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
/* Setup the vector used for configuration events */
- iowrite16(vector, &vp_dev->common->msix_config);
+ vp_iowrite16(vector, &vp_dev->common->msix_config);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
- return ioread16(&vp_dev->common->msix_config);
+ return vp_ioread16(&vp_dev->common->msix_config);
}
static size_t vring_pci_size(u16 num)
@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 num, off;
int err;
- if (index >= ioread16(&cfg->num_queues))
+ if (index >= vp_ioread16(&cfg->num_queues))
return ERR_PTR(-ENOENT);
/* Select the queue we're interested in */
- iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(index, &cfg->queue_select);
/* Check if queue is either not available or already active. */
- num = ioread16(&cfg->queue_size);
- if (!num || ioread16(&cfg->queue_enable))
+ num = vp_ioread16(&cfg->queue_size);
+ if (!num || vp_ioread16(&cfg->queue_enable))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* get offset of notification word for this vq */
- off = ioread16(&cfg->queue_notify_off);
+ off = vp_ioread16(&cfg->queue_notify_off);
info->num = num;
info->msix_vector = msix_vec;
@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* activate the queue */
- iowrite16(num, &cfg->queue_size);
- iowrite64_twopart(virt_to_phys(info->queue),
- &cfg->queue_desc_lo, &cfg->queue_desc_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
- &cfg->queue_avail_lo, &cfg->queue_avail_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
- &cfg->queue_used_lo, &cfg->queue_used_hi);
+ vp_iowrite16(num, &cfg->queue_size);
+ vp_iowrite64_twopart(virt_to_phys(info->queue),
+ &cfg->queue_desc_lo, &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
+ &cfg->queue_avail_lo, &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
+ &cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) {
/* offset should not wrap */
@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(msix_vec, &cfg->queue_msix_vector);
- msix_vec = ioread16(&cfg->queue_msix_vector);
+ vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
+ msix_vec = vp_ioread16(&cfg->queue_msix_vector);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* this, there's no way to go back except reset.
*/
list_for_each_entry(vq, &vdev->vqs, list) {
- iowrite16(vq->index, &vp_dev->common->queue_select);
- iowrite16(1, &vp_dev->common->queue_enable);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(1, &vp_dev->common->queue_enable);
}
return 0;
@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
if (vp_dev->msix_enabled) {
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- &vp_dev->common->queue_msix_vector);
+ vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->common->queue_msix_vector);
/* Flush the write out to device */
- ioread16(&vp_dev->common->queue_msix_vector);
+ vp_ioread16(&vp_dev->common->queue_msix_vector);
}
if (!vp_dev->notify_base)
@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
}
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
- if (virtio_device_is_legacy_only(vp_dev->vdev.id))
- return -ENODEV;
-
/* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
IORESOURCE_IO | IORESOURCE_MEM);