blob: 571c1ce37d152f86c3690d9e2427f5eeca97cd2c [file] [log] [blame]
Laurent Vivier5f94c172008-05-30 16:05:54 +02001/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
Laurent Vivier5f94c172008-05-30 16:05:54 +02006 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
Andre Przywaraaf669ac2015-03-26 14:39:29 +000011#include <kvm/iodev.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020012
13#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020015#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
Gregory Haskinsd76685c42009-06-01 12:54:50 -040019static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030024static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
Laurent Vivier5f94c172008-05-30 16:05:54 +020026{
Sasha Levin2b3c246a2011-07-20 20:59:00 +030027 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
Dan Carpenter1a214242011-10-19 09:15:10 +030031 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
Laurent Vivier5f94c172008-05-30 16:05:54 +020040}
41
Sasha Levinc2981252011-07-18 17:17:14 +030042static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
43{
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
46
47 /* Are we able to batch it ? */
48
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
52 */
53 ring = dev->kvm->coalesced_mmio_ring;
54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
55 if (avail == 0) {
56 /* full */
57 return 0;
58 }
59
60 return 1;
61}
62
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +000063static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
Laurent Vivier5f94c172008-05-30 16:05:54 +020066{
Gregory Haskinsd76685c42009-06-01 12:54:50 -040067 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020068 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
Sasha Levinc2981252011-07-18 17:17:14 +030069
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030070 if (!coalesced_mmio_in_range(dev, addr, len))
71 return -EOPNOTSUPP;
Laurent Vivier5f94c172008-05-30 16:05:54 +020072
Sasha Levin2b3c246a2011-07-20 20:59:00 +030073 spin_lock(&dev->kvm->ring_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +020074
Sasha Levinc2981252011-07-18 17:17:14 +030075 if (!coalesced_mmio_has_room(dev)) {
Sasha Levin2b3c246a2011-07-20 20:59:00 +030076 spin_unlock(&dev->kvm->ring_lock);
Sasha Levinc2981252011-07-18 17:17:14 +030077 return -EOPNOTSUPP;
78 }
79
Laurent Vivier5f94c172008-05-30 16:05:54 +020080 /* copy data in first free entry of the ring */
81
82 ring->coalesced_mmio[ring->last].phys_addr = addr;
83 ring->coalesced_mmio[ring->last].len = len;
84 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
85 smp_wmb();
86 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
Sasha Levin2b3c246a2011-07-20 20:59:00 +030087 spin_unlock(&dev->kvm->ring_lock);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030088 return 0;
Laurent Vivier5f94c172008-05-30 16:05:54 +020089}
90
91static void coalesced_mmio_destructor(struct kvm_io_device *this)
92{
Gregory Haskinsd76685c42009-06-01 12:54:50 -040093 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040094
Sasha Levin2b3c246a2011-07-20 20:59:00 +030095 list_del(&dev->list);
96
Gregory Haskins787a6602009-06-01 12:54:45 -040097 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +020098}
99
Gregory Haskinsd76685c42009-06-01 12:54:50 -0400100static const struct kvm_io_device_ops coalesced_mmio_ops = {
101 .write = coalesced_mmio_write,
Gregory Haskinsd76685c42009-06-01 12:54:50 -0400102 .destructor = coalesced_mmio_destructor,
103};
104
Laurent Vivier5f94c172008-05-30 16:05:54 +0200105int kvm_coalesced_mmio_init(struct kvm *kvm)
106{
Avi Kivity980da6c2009-12-20 15:13:43 +0200107 struct page *page;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400108 int ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200109
Avi Kivity980da6c2009-12-20 15:13:43 +0200110 ret = -ENOMEM;
111 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
112 if (!page)
113 goto out_err;
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300114
115 ret = 0;
Avi Kivity980da6c2009-12-20 15:13:43 +0200116 kvm->coalesced_mmio_ring = page_address(page);
117
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300118 /*
119 * We're using this spinlock to sync access to the coalesced ring.
120 * The list doesn't need it's own lock since device registration and
121 * unregistration should only happen when kvm->slots_lock is held.
122 */
123 spin_lock_init(&kvm->ring_lock);
124 INIT_LIST_HEAD(&kvm->coalesced_zones);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200125
Avi Kivity980da6c2009-12-20 15:13:43 +0200126out_err:
127 return ret;
128}
129
130void kvm_coalesced_mmio_free(struct kvm *kvm)
131{
132 if (kvm->coalesced_mmio_ring)
133 free_page((unsigned long)kvm->coalesced_mmio_ring);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200134}
135
136int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
Jochen Maes43db6692010-02-08 11:29:33 +0100137 struct kvm_coalesced_mmio_zone *zone)
Laurent Vivier5f94c172008-05-30 16:05:54 +0200138{
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300139 int ret;
140 struct kvm_coalesced_mmio_dev *dev;
141
142 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
143 if (!dev)
144 return -ENOMEM;
145
146 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
147 dev->kvm = kvm;
148 dev->zone = *zone;
149
150 mutex_lock(&kvm->slots_lock);
Sasha Levin743eeb02011-07-27 16:00:48 +0300151 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
152 zone->size, &dev->dev);
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300153 if (ret < 0)
154 goto out_free_dev;
155 list_add_tail(&dev->list, &kvm->coalesced_zones);
156 mutex_unlock(&kvm->slots_lock);
157
Dan Carpenteraac5c422014-01-29 16:16:39 +0300158 return 0;
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300159
160out_free_dev:
161 mutex_unlock(&kvm->slots_lock);
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300162 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200163
Dan Carpenteraac5c422014-01-29 16:16:39 +0300164 return ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200165}
166
167int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
168 struct kvm_coalesced_mmio_zone *zone)
169{
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300170 struct kvm_coalesced_mmio_dev *dev, *tmp;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200171
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200172 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200173
Sasha Levin2b3c246a2011-07-20 20:59:00 +0300174 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
175 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
176 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
177 kvm_iodevice_destructor(&dev->dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200178 }
Laurent Vivier5f94c172008-05-30 16:05:54 +0200179
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200180 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200181
182 return 0;
183}