Merge branches 'iommu/fixes', 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/tegra' and 'core' into next
Conflicts:
drivers/iommu/amd_iommu.c
drivers/iommu/tegra-gart.c
drivers/iommu/tegra-smmu.c
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f08..ef150eb 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4737,7 +4737,7 @@
return NULL;
/* libsas case */
- if (!ap->scsi_host) {
+ if (ap->flags & ATA_FLAG_SAS_HOST) {
tag = ata_sas_allocate_tag(ap);
if (tag < 0)
return NULL;
@@ -4776,7 +4776,7 @@
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
- if (!ap->scsi_host)
+ if (ap->flags & ATA_FLAG_SAS_HOST)
ata_sas_free_tag(tag, ap);
}
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27..a13587b 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@
extern struct regcache_ops regcache_lzo_ops;
extern struct regcache_ops regcache_flat_ops;
+static inline const char *regmap_name(const struct regmap *map)
+{
+ if (map->dev)
+ return dev_name(map->dev);
+
+ return map->name;
+}
+
#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index da84f54..87db989 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@
ret = map->cache_ops->read(map, reg, value);
if (ret == 0)
- trace_regmap_reg_read_cache(map->dev, reg, *value);
+ trace_regmap_reg_read_cache(map, reg, *value);
return ret;
}
@@ -311,7 +311,7 @@
dev_dbg(map->dev, "Syncing %s cache\n",
map->cache_ops->name);
name = map->cache_ops->name;
- trace_regcache_sync(map->dev, name, "start");
+ trace_regcache_sync(map, name, "start");
if (!map->cache_dirty)
goto out;
@@ -346,7 +346,7 @@
regmap_async_complete(map);
- trace_regcache_sync(map->dev, name, "stop");
+ trace_regcache_sync(map, name, "stop");
return ret;
}
@@ -381,7 +381,7 @@
name = map->cache_ops->name;
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
- trace_regcache_sync(map->dev, name, "start region");
+ trace_regcache_sync(map, name, "start region");
if (!map->cache_dirty)
goto out;
@@ -401,7 +401,7 @@
regmap_async_complete(map);
- trace_regcache_sync(map->dev, name, "stop region");
+ trace_regcache_sync(map, name, "stop region");
return ret;
}
@@ -428,7 +428,7 @@
map->lock(map->lock_arg);
- trace_regcache_drop_region(map->dev, min, max);
+ trace_regcache_drop_region(map, min, max);
ret = map->cache_ops->drop(map, min, max);
@@ -455,7 +455,7 @@
map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
- trace_regmap_cache_only(map->dev, enable);
+ trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@
map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
- trace_regmap_cache_bypass(map->dev, enable);
+ trace_regmap_cache_bypass(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098..dbfe6a6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@
if (map->async && map->bus->async_write) {
struct regmap_async *async;
- trace_regmap_async_write_start(map->dev, reg, val_len);
+ trace_regmap_async_write_start(map, reg, val_len);
spin_lock_irqsave(&map->async_lock, flags);
async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@
return ret;
}
- trace_regmap_hw_write_start(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
/* If we're doing a single register write we can probably just
* send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@
kfree(buf);
}
- trace_regmap_hw_write_done(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@@ -1407,12 +1405,12 @@
map->format.format_write(map, reg, val);
- trace_regmap_hw_write_start(map->dev, reg, 1);
+ trace_regmap_hw_write_start(map, reg, 1);
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.buf_size);
- trace_regmap_hw_write_done(map->dev, reg, 1);
+ trace_regmap_hw_write_done(map, reg, 1);
return ret;
}
@@ -1470,7 +1468,7 @@
dev_info(map->dev, "%x <= %x\n", reg, val);
#endif
- trace_regmap_reg_write(map->dev, reg, val);
+ trace_regmap_reg_write(map, reg, val);
return map->reg_write(context, reg, val);
}
@@ -1773,7 +1771,7 @@
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
int val = regs[i].def;
- trace_regmap_hw_write_start(map->dev, reg, 1);
+ trace_regmap_hw_write_start(map, reg, 1);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
- trace_regmap_hw_write_done(map->dev, reg, 1);
+ trace_regmap_hw_write_done(map, reg, 1);
}
return ret;
}
@@ -2059,15 +2057,13 @@
*/
u8[0] |= map->read_flag_mask;
- trace_regmap_hw_read_start(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->bus->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
- trace_regmap_hw_read_done(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@@ -2123,7 +2119,7 @@
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
- trace_regmap_reg_read(map->dev, reg, *val);
+ trace_regmap_reg_read(map, reg, *val);
if (!map->cache_bypass)
regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@
struct regmap *map = async->map;
bool wake;
- trace_regmap_async_io_complete(map->dev);
+ trace_regmap_async_io_complete(map);
spin_lock(&map->async_lock);
list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@
if (!map->bus || !map->bus->async_write)
return 0;
- trace_regmap_async_complete_start(map->dev);
+ trace_regmap_async_complete_start(map);
wait_event(map->async_waitq, regmap_async_is_done(map));
@@ -2534,7 +2530,7 @@
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
- trace_regmap_async_complete_done(map->dev);
+ trace_regmap_async_complete_done(map);
return ret;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5c..a98c41f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@
return -EINVAL;
}
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
- return -ENOMEM;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -828,6 +824,10 @@
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
+ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+ if (!nbd_dev)
+ return -ENOMEM;
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ceb32dd..e23be20 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3003,6 +3003,7 @@
}
get_device(dev->device);
+ INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->probe_work, nvme_async_probe);
schedule_work(&dev->probe_work);
return 0;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68161f7..a0b036c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -192,6 +192,7 @@
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT
help
This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@
config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2
help
This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU
help
This enables build of a clocksource and clockevent driver for
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 5dcbf90..58597fb 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/reset.h>
-#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -137,11 +136,6 @@
.dev_id = &sun5i_clockevent,
};
-static u64 sun5i_timer_sched_read(void)
-{
- return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
-}
-
static void __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
@@ -172,7 +166,6 @@
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
timer_base + TIMER_CTL_REG(1));
- sched_clock_register(sun5i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
rate, 340, 32, clocksource_mmio_readl_down);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f6d04c7..679b10e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -525,17 +525,6 @@
}
EXPORT_SYMBOL(drm_framebuffer_reference);
-static void drm_framebuffer_free_bug(struct kref *kref)
-{
- BUG();
-}
-
-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
- kref_put(&fb->refcount, drm_framebuffer_free_bug);
-}
-
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
@@ -1320,7 +1309,7 @@
return;
}
/* disconnect the plane from the fb and crtc: */
- __drm_framebuffer_unreference(plane->old_fb);
+ drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5b20586..27ea6bd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2737,24 +2737,11 @@
WARN_ON(i915_verify_lists(ring->dev));
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate,
- * before we free the context associated with the requests.
+ /* Retire requests first as we use it above for the early return.
+ * If we retire requests last, we may use a later seqno and so clear
+ * the requests lists without clearing the active list, leading to
+ * confusion.
*/
- while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
-
- if (!i915_gem_request_completed(obj->last_read_req, true))
- break;
-
- i915_gem_object_move_to_inactive(obj);
- }
-
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@
i915_gem_free_request(request);
}
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_gem_request_completed(obj->last_read_req, true))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
if (unlikely(ring->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6d22128..f75173c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2438,8 +2438,15 @@
if (!intel_crtc->base.primary->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config))
+ if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
+ struct drm_plane *primary = intel_crtc->base.primary;
+
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+
return;
+ }
kfree(intel_crtc->base.primary->fb);
intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ struct drm_plane *primary = intel_crtc->base.primary;
+
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(c->primary->fb);
- intel_crtc->base.primary->fb = c->primary->fb;
+ primary->fb = c->primary->fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
@@ -6663,7 +6674,6 @@
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
return;
error:
@@ -7798,7 +7807,6 @@
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 48882c1..e43d489 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -33,6 +33,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
+#include <linux/dma-contiguous.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -126,6 +127,11 @@
*
****************************************************************************/
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1321,7 +1327,9 @@
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long *page_size)
{
int level;
u64 *pte;
@@ -1329,8 +1337,9 @@
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -1339,19 +1348,9 @@
return NULL;
/* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 0x07) {
- unsigned long pte_mask, __pte;
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- pte_mask = PTE_PAGE_SIZE(*pte);
- pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
- __pte = ((unsigned long)pte) & pte_mask;
-
- return (u64 *)__pte;
- }
+ if (PM_PTE_LEVEL(*pte) == 7 ||
+ PM_PTE_LEVEL(*pte) == 0)
+ break;
/* No level skipping support yet */
if (PM_PTE_LEVEL(*pte) != level)
@@ -1360,8 +1359,21 @@
level -= 1;
/* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ if (PM_PTE_LEVEL(*pte) == 0x07) {
+ unsigned long pte_mask;
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
+ pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+ pte = (u64 *)(((unsigned long)pte) & pte_mask);
}
return pte;
@@ -1383,13 +1395,14 @@
u64 __pte, *pte;
int i, count;
+ BUG_ON(!IS_ALIGNED(bus_addr, page_size));
+ BUG_ON(!IS_ALIGNED(phys_addr, page_size));
+
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
- bus_addr = PAGE_ALIGN(bus_addr);
- phys_addr = PAGE_ALIGN(phys_addr);
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ count = PAGE_SIZE_PTE_COUNT(page_size);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
if (!pte)
return -ENOMEM;
@@ -1398,7 +1411,7 @@
if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY;
- if (page_size > PAGE_SIZE) {
+ if (count > 1) {
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
@@ -1421,7 +1434,8 @@
unsigned long bus_addr,
unsigned long page_size)
{
- unsigned long long unmap_size, unmapped;
+ unsigned long long unmapped;
+ unsigned long unmap_size;
u64 *pte;
BUG_ON(!is_power_of_2(page_size));
@@ -1430,28 +1444,12 @@
while (unmapped < page_size) {
- pte = fetch_pte(dom, bus_addr);
+ pte = fetch_pte(dom, bus_addr, &unmap_size);
- if (!pte) {
- /*
- * No PTE for this address
- * move forward in 4kb steps
- */
- unmap_size = PAGE_SIZE;
- } else if (PM_PTE_LEVEL(*pte) == 0) {
- /* 4kb PTE found for this address */
- unmap_size = PAGE_SIZE;
- *pte = 0ULL;
- } else {
- int count, i;
+ if (pte) {
+ int i, count;
- /* Large PTE found which maps this address */
- unmap_size = PTE_PAGE_SIZE(*pte);
-
- /* Only unmap from the first pte in the page */
- if ((unmap_size - 1) & bus_addr)
- break;
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
}
@@ -1599,7 +1597,7 @@
{
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
struct amd_iommu *iommu;
- unsigned long i, old_size;
+ unsigned long i, old_size, pte_pgsize;
#ifdef CONFIG_IOMMU_STRESS
populate = false;
@@ -1672,12 +1670,13 @@
*/
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
- i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i);
+ i += pte_pgsize) {
+ u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
- dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
+ dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
+ pte_pgsize >> 12);
}
update_domain(&dma_dom->domain);
@@ -2422,16 +2421,6 @@
dev_data = get_dev_data(dev);
switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
-
- domain = domain_for_device(dev);
-
- if (!domain)
- goto out;
- if (dev_data->passthrough)
- break;
- detach_device(dev);
- break;
case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev);
@@ -2467,7 +2456,7 @@
dev->archdata.dma_ops = &amd_iommu_dma_ops;
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
iommu_uninit_device(dev);
@@ -2923,38 +2912,42 @@
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
- unsigned long flags;
- void *virt_addr;
- struct protection_domain *domain;
- phys_addr_t paddr;
u64 dma_mask = dev->coherent_dma_mask;
+ struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_alloc_coherent);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- *dma_addr = __pa(virt_addr);
- return virt_addr;
+ page = alloc_pages(flag, get_order(size));
+ *dma_addr = page_to_phys(page);
+ return page_address(page);
} else if (IS_ERR(domain))
return NULL;
+ size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
- virt_addr = (void *)__get_free_pages(flag, get_order(size));
- if (!virt_addr)
- return NULL;
+ page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
+ if (!page) {
+ if (!(flag & __GFP_WAIT))
+ return NULL;
- paddr = virt_to_phys(virt_addr);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+ }
if (!dma_mask)
dma_mask = *dev->dma_mask;
spin_lock_irqsave(&domain->lock, flags);
- *dma_addr = __map_single(dev, domain->priv, paddr,
+ *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == DMA_ERROR_CODE) {
@@ -2966,11 +2959,12 @@
spin_unlock_irqrestore(&domain->lock, flags);
- return virt_addr;
+ return page_address(page);
out_free:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
return NULL;
}
@@ -2982,11 +2976,15 @@
void *virt_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
- unsigned long flags;
struct protection_domain *domain;
+ unsigned long flags;
+ struct page *page;
INC_STATS_COUNTER(cnt_free_coherent);
+ page = virt_to_page(virt_addr);
+ size = PAGE_ALIGN(size);
+
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
@@ -3000,7 +2998,8 @@
spin_unlock_irqrestore(&domain->lock, flags);
free_mem:
- free_pages((unsigned long)virt_addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, get_order(size));
}
/*
@@ -3236,42 +3235,45 @@
return 0;
}
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+{
+ struct protection_domain *pdomain;
+
+ /* We only support unmanaged domains for now */
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ pdomain = protection_domain_alloc();
+ if (!pdomain)
+ goto out_free;
+
+ pdomain->mode = PAGE_MODE_3_LEVEL;
+ pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pdomain->pt_root)
+ goto out_free;
+
+ pdomain->domain.geometry.aperture_start = 0;
+ pdomain->domain.geometry.aperture_end = ~0ULL;
+ pdomain->domain.geometry.force_aperture = true;
+
+ return &pdomain->domain;
+
+out_free:
+ protection_domain_free(pdomain);
+
+ return NULL;
+}
+
+static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- domain = protection_domain_alloc();
- if (!domain)
- goto out_free;
-
- domain->mode = PAGE_MODE_3_LEVEL;
- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!domain->pt_root)
- goto out_free;
-
- domain->iommu_domain = dom;
-
- dom->priv = domain;
-
- dom->geometry.aperture_start = 0;
- dom->geometry.aperture_end = ~0ULL;
- dom->geometry.force_aperture = true;
-
- return 0;
-
-out_free:
- protection_domain_free(domain);
-
- return -ENOMEM;
-}
-
-static void amd_iommu_domain_destroy(struct iommu_domain *dom)
-{
- struct protection_domain *domain = dom->priv;
-
- if (!domain)
+ if (!dom)
return;
+ domain = to_pdomain(dom);
+
if (domain->dev_cnt > 0)
cleanup_domain(domain);
@@ -3284,8 +3286,6 @@
free_gcr3_table(domain);
protection_domain_free(domain);
-
- dom->priv = NULL;
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
@@ -3313,7 +3313,7 @@
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int ret;
@@ -3340,7 +3340,7 @@
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
int ret;
@@ -3362,7 +3362,7 @@
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
@@ -3380,28 +3380,22 @@
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
- struct protection_domain *domain = dom->priv;
- unsigned long offset_mask;
- phys_addr_t paddr;
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
if (domain->mode == PAGE_MODE_NONE)
return iova;
- pte = fetch_pte(domain, iova);
+ pte = fetch_pte(domain, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
- if (PM_PTE_LEVEL(*pte) == 0)
- offset_mask = PAGE_SIZE - 1;
- else
- offset_mask = PTE_PAGE_SIZE(*pte) - 1;
+ offset_mask = pte_pgsize - 1;
+ __pte = *pte & PM_ADDR_MASK;
- __pte = *pte & PM_ADDR_MASK;
- paddr = (__pte & ~offset_mask) | (iova & offset_mask);
-
- return paddr;
+ return (__pte & ~offset_mask) | (iova & offset_mask);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@@ -3420,8 +3414,8 @@
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
- .domain_init = amd_iommu_domain_init,
- .domain_destroy = amd_iommu_domain_destroy,
+ .domain_alloc = amd_iommu_domain_alloc,
+ .domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
@@ -3483,7 +3477,7 @@
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
@@ -3504,7 +3498,7 @@
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int levels, ret;
@@ -3616,7 +3610,7 @@
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
u64 address)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3638,7 +3632,7 @@
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3718,7 +3712,7 @@
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
unsigned long cr3)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3732,7 +3726,7 @@
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{
- struct protection_domain *domain = dom->priv;
+ struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@@ -3765,17 +3759,17 @@
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
- struct protection_domain *domain;
+ struct protection_domain *pdomain;
- domain = get_domain(&pdev->dev);
- if (IS_ERR(domain))
+ pdomain = get_domain(&pdev->dev);
+ if (IS_ERR(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
- if (!(domain->flags & PD_IOMMUV2_MASK))
+ if (!(pdomain->flags & PD_IOMMUV2_MASK))
return NULL;
- return domain->iommu_domain;
+ return &pdomain->domain;
}
EXPORT_SYMBOL(amd_iommu_get_v2_domain);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c4fffb7..05030e5 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -282,6 +282,12 @@
#define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
+/*
+ * Takes a page-table level and returns the default page-size for this level
+ */
+#define PTE_LEVEL_PAGE_SIZE(level) \
+ (1ULL << (12 + (9 * (level))))
+
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
@@ -400,6 +406,8 @@
struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
+ struct iommu_domain domain; /* generic domain handle used by
+ iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
@@ -411,10 +419,7 @@
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
- struct iommu_domain *iommu_domain; /* Pointer to generic
- domain structure */
-
+ void *priv; /* private data */
};
/*
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6d5a5c4..a1cbba9 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -417,7 +417,7 @@
dev_state = pasid_state->device_state;
run_inv_ctx_cb = !pasid_state->invalid;
- if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
+ if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
unbind_pasid(pasid_state);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a3adde6..9f7e1d3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -343,6 +343,7 @@
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
+ struct iommu_domain domain;
};
static struct iommu_ops arm_smmu_ops;
@@ -360,6 +361,11 @@
{ 0, NULL},
};
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -645,7 +651,7 @@
u32 fsr, far, fsynr, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
@@ -730,6 +736,20 @@
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ if (smmu->version > ARM_SMMU_V1) {
+ /*
+ * CBA2R.
+ * *Must* be initialised before CBAR thanks to VMID16
+ * architectural oversight affected some implementations.
+ */
+#ifdef CONFIG_64BIT
+ reg = CBA2R_RW64_64BIT;
+#else
+ reg = CBA2R_RW64_32BIT;
+#endif
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+ }
+
/* CBAR */
reg = cfg->cbar;
if (smmu->version == ARM_SMMU_V1)
@@ -747,16 +767,6 @@
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- if (smmu->version > ARM_SMMU_V1) {
- /* CBA2R */
-#ifdef CONFIG_64BIT
- reg = CBA2R_RW64_64BIT;
-#else
- reg = CBA2R_RW64_32BIT;
-#endif
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
- }
-
/* TTBRs */
if (stage1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
@@ -836,7 +846,7 @@
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
mutex_lock(&smmu_domain->init_mutex);
@@ -958,7 +968,7 @@
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *cb_base;
@@ -985,10 +995,12 @@
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
-static int arm_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -996,17 +1008,17 @@
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return -ENOMEM;
+ return NULL;
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
- domain->priv = smmu_domain;
- return 0;
+
+ return &smmu_domain->domain;
}
-static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+static void arm_smmu_domain_free(struct iommu_domain *domain)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
/*
* Free the domain resources. We assume that all devices have
@@ -1143,7 +1155,7 @@
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
@@ -1187,7 +1199,7 @@
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_cfg *cfg;
cfg = find_smmu_master_cfg(dev);
@@ -1203,7 +1215,7 @@
{
int ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1220,7 +1232,7 @@
{
size_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1235,7 +1247,7 @@
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@@ -1281,7 +1293,7 @@
{
phys_addr_t ret;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@@ -1329,61 +1341,83 @@
kfree(data);
}
-static int arm_smmu_add_device(struct device *dev)
+static int arm_smmu_add_pci_device(struct pci_dev *pdev)
{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ int i, ret;
+ u16 sid;
struct iommu_group *group;
- void (*releasefn)(void *) = NULL;
- int ret;
+ struct arm_smmu_master_cfg *cfg;
- smmu = find_smmu_for_device(dev);
- if (!smmu)
- return -ENODEV;
-
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
+ group = iommu_group_get_for_dev(&pdev->dev);
+ if (IS_ERR(group))
return PTR_ERR(group);
- }
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
+ cfg = iommu_group_get_iommudata(group);
+ if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
ret = -ENOMEM;
goto out_put_group;
}
- cfg->num_streamids = 1;
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
- &cfg->streamids[0]);
- releasefn = __arm_smmu_release_pci_iommudata;
- } else {
- struct arm_smmu_master *master;
-
- master = find_smmu_master(smmu, dev->of_node);
- if (!master) {
- ret = -ENODEV;
- goto out_put_group;
- }
-
- cfg = &master->cfg;
+ iommu_group_set_iommudata(group, cfg,
+ __arm_smmu_release_pci_iommudata);
}
- iommu_group_set_iommudata(group, cfg, releasefn);
- ret = iommu_group_add_device(group, dev);
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
+ ret = -ENOSPC;
+ goto out_put_group;
+ }
+ /*
+ * Assume Stream ID == Requester ID for now.
+ * We need a way to describe the ID mappings in FDT.
+ */
+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+ for (i = 0; i < cfg->num_streamids; ++i)
+ if (cfg->streamids[i] == sid)
+ break;
+
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
+ if (i == cfg->num_streamids)
+ cfg->streamids[cfg->num_streamids++] = sid;
+
+ return 0;
out_put_group:
iommu_group_put(group);
return ret;
}
+static int arm_smmu_add_platform_device(struct device *dev)
+{
+ struct iommu_group *group;
+ struct arm_smmu_master *master;
+ struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+
+ if (!smmu)
+ return -ENODEV;
+
+ master = find_smmu_master(smmu, dev->of_node);
+ if (!master)
+ return -ENODEV;
+
+ /* No automatic group creation for platform devices */
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_set_iommudata(group, &master->cfg, NULL);
+ return iommu_group_add_device(group, dev);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return arm_smmu_add_pci_device(to_pci_dev(dev));
+
+ return arm_smmu_add_platform_device(dev);
+}
+
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
@@ -1392,7 +1426,7 @@
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
switch (attr) {
case DOMAIN_ATTR_NESTING:
@@ -1407,7 +1441,7 @@
enum iommu_attr attr, void *data)
{
int ret = 0;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
@@ -1435,8 +1469,8 @@
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
+ .domain_alloc = arm_smmu_domain_alloc,
+ .domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
@@ -1633,6 +1667,15 @@
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
smmu->pa_size = size;
+ /*
+ * What the page table walker can address actually depends on which
+ * descriptor format is in use, but since a) we don't know that yet,
+ * and b) it can vary per context bank, this will have to do...
+ */
+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
+ dev_warn(smmu->dev,
+ "failed to set DMA mask for table walker\n");
+
if (smmu->version == ARM_SMMU_V1) {
smmu->va_size = smmu->ipa_size;
size = SZ_4K | SZ_2M | SZ_1G;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index dc14fec4..3e89850 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -200,6 +200,7 @@
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+ struct iommu_domain domain; /* generic domain data structure */
};
struct sysmmu_drvdata {
@@ -214,6 +215,11 @@
phys_addr_t pgtable;
};
+static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct exynos_iommu_domain, domain);
+}
+
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{
/* return true if the System MMU was not active previously
@@ -696,58 +702,60 @@
virt_to_phys(vaend));
}
-static int exynos_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{
- struct exynos_iommu_domain *priv;
+ struct exynos_iommu_domain *exynos_domain;
int i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
- priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
- if (!priv->pgtable)
+ exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
+ if (!exynos_domain)
+ return NULL;
+
+ exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
+ if (!exynos_domain->pgtable)
goto err_pgtable;
- priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!priv->lv2entcnt)
+ exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!exynos_domain->lv2entcnt)
goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
- priv->pgtable[i + 0] = ZERO_LV2LINK;
- priv->pgtable[i + 1] = ZERO_LV2LINK;
- priv->pgtable[i + 2] = ZERO_LV2LINK;
- priv->pgtable[i + 3] = ZERO_LV2LINK;
- priv->pgtable[i + 4] = ZERO_LV2LINK;
- priv->pgtable[i + 5] = ZERO_LV2LINK;
- priv->pgtable[i + 6] = ZERO_LV2LINK;
- priv->pgtable[i + 7] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
+ exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
}
- pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+ pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->pgtablelock);
- INIT_LIST_HEAD(&priv->clients);
+ spin_lock_init(&exynos_domain->lock);
+ spin_lock_init(&exynos_domain->pgtablelock);
+ INIT_LIST_HEAD(&exynos_domain->clients);
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = ~0UL;
- domain->geometry.force_aperture = true;
+ exynos_domain->domain.geometry.aperture_start = 0;
+ exynos_domain->domain.geometry.aperture_end = ~0UL;
+ exynos_domain->domain.geometry.force_aperture = true;
- domain->priv = priv;
- return 0;
+ return &exynos_domain->domain;
err_counter:
- free_pages((unsigned long)priv->pgtable, 2);
+ free_pages((unsigned long)exynos_domain->pgtable, 2);
err_pgtable:
- kfree(priv);
- return -ENOMEM;
+ kfree(exynos_domain);
+ return NULL;
}
-static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+static void exynos_iommu_domain_free(struct iommu_domain *domain)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
struct exynos_iommu_owner *owner;
unsigned long flags;
int i;
@@ -773,15 +781,14 @@
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
- kfree(domain->priv);
- domain->priv = NULL;
+ kfree(priv);
}
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
@@ -812,7 +819,7 @@
struct device *dev)
{
struct exynos_iommu_owner *owner;
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
@@ -988,7 +995,7 @@
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags;
@@ -1042,7 +1049,7 @@
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
unsigned long l_iova, size_t size)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
sysmmu_pte_t *ent;
size_t err_pgsize;
@@ -1119,7 +1126,7 @@
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct exynos_iommu_domain *priv = domain->priv;
+ struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
unsigned long flags;
phys_addr_t phys = 0;
@@ -1171,8 +1178,8 @@
}
static const struct iommu_ops exynos_iommu_ops = {
- .domain_init = exynos_iommu_domain_init,
- .domain_destroy = exynos_iommu_domain_destroy,
+ .domain_alloc = exynos_iommu_domain_alloc,
+ .domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index ceebd28..1d45293 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -33,6 +33,11 @@
static struct kmem_cache *iommu_devinfo_cache;
static DEFINE_SPINLOCK(device_domain_lock);
+static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct fsl_dma_domain, iommu_domain);
+}
+
static int __init iommu_init_mempool(void)
{
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
@@ -65,7 +70,7 @@
struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
- geom = &dma_domain->iommu_domain->geometry;
+ geom = &dma_domain->iommu_domain.geometry;
if (!win_cnt || !dma_domain->geom_size) {
pr_debug("Number of windows/geometry not configured for the domain\n");
@@ -123,7 +128,7 @@
{
int ret;
struct dma_window *wnd = &dma_domain->win_arr[0];
- phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+ phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
@@ -172,7 +177,7 @@
} else {
phys_addr_t wnd_addr;
- wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+ wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size,
@@ -384,7 +389,7 @@
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
@@ -398,11 +403,9 @@
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
-static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
+static void fsl_pamu_domain_free(struct iommu_domain *domain)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
-
- domain->priv = NULL;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
/* remove all the devices from the device list */
detach_device(NULL, dma_domain);
@@ -413,23 +416,24 @@
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
}
-static int fsl_pamu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{
struct fsl_dma_domain *dma_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
dma_domain = iommu_alloc_dma_domain();
if (!dma_domain) {
pr_debug("dma_domain allocation failed\n");
- return -ENOMEM;
+ return NULL;
}
- domain->priv = dma_domain;
- dma_domain->iommu_domain = domain;
/* defaul geometry 64 GB i.e. maximum system address */
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 36) - 1;
- domain->geometry.force_aperture = true;
+ dma_domain->iommu_domain. geometry.aperture_start = 0;
+ dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
+ dma_domain->iommu_domain.geometry.force_aperture = true;
- return 0;
+ return &dma_domain->iommu_domain;
}
/* Configure geometry settings for all LIODNs associated with domain */
@@ -499,7 +503,7 @@
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@@ -530,7 +534,7 @@
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
struct dma_window *wnd;
int pamu_prot = 0;
int ret;
@@ -607,7 +611,7 @@
int num)
{
unsigned long flags;
- struct iommu_domain *domain = dma_domain->iommu_domain;
+ struct iommu_domain *domain = &dma_domain->iommu_domain;
int ret = 0;
int i;
@@ -653,7 +657,7 @@
static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *liodn;
u32 liodn_cnt;
int len, ret = 0;
@@ -691,7 +695,7 @@
static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *prop;
int len;
struct pci_dev *pdev = NULL;
@@ -723,7 +727,7 @@
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
{
struct iommu_domain_geometry *geom_attr = data;
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
dma_addr_t geom_size;
unsigned long flags;
@@ -813,7 +817,7 @@
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@@ -838,7 +842,7 @@
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@@ -999,7 +1003,7 @@
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@@ -1048,15 +1052,15 @@
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
return dma_domain->win_cnt;
}
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
- .domain_init = fsl_pamu_domain_init,
- .domain_destroy = fsl_pamu_domain_destroy,
+ .domain_alloc = fsl_pamu_domain_alloc,
+ .domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
index c90293f..f2b0f74 100644
--- a/drivers/iommu/fsl_pamu_domain.h
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -71,7 +71,7 @@
u32 stash_id;
struct pamu_stash_attribute dma_stash;
u32 snoop_id;
- struct iommu_domain *iommu_domain;
+ struct iommu_domain iommu_domain;
spinlock_t domain_lock;
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d1e05b..d42a3f3 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -339,7 +339,7 @@
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
- struct list_head devices; /* all devices' list */
+ struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@@ -358,6 +358,9 @@
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
};
/* PCI domain-device relationship */
@@ -449,6 +452,12 @@
static const struct iommu_ops intel_iommu_ops;
+/* Convert generic 'struct iommu_domain to private struct dmar_domain */
+static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
static int __init intel_iommu_setup(char *str)
{
if (!str)
@@ -595,12 +604,13 @@
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int i, found = 0;
+ bool found = false;
+ int i;
domain->iommu_coherency = 1;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
- found = 1;
+ found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
@@ -1267,7 +1277,7 @@
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
- int found = 0;
+ bool found = false;
unsigned long flags;
struct device_domain_info *info;
struct pci_dev *pdev;
@@ -1282,7 +1292,7 @@
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- found = 1;
+ found = true;
break;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4269,7 +4279,7 @@
struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
- int found = 0;
+ bool found = false;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4301,7 +4311,7 @@
* update iommu count and coherency
*/
if (info->iommu == iommu)
- found = 1;
+ found = true;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4339,44 +4349,45 @@
return 0;
}
-static int intel_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
- return -ENOMEM;
+ return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
domain_exit(dmar_domain);
- return -ENOMEM;
+ return NULL;
}
domain_update_iommu_cap(dmar_domain);
- domain->priv = dmar_domain;
+ domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
- return 0;
+ return domain;
}
-static void intel_iommu_domain_destroy(struct iommu_domain *domain)
+static void intel_iommu_domain_free(struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = domain->priv;
-
- domain->priv = NULL;
- domain_exit(dmar_domain);
+ domain_exit(to_dmar_domain(domain));
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
u8 bus, devfn;
@@ -4441,16 +4452,14 @@
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct dmar_domain *dmar_domain = domain->priv;
-
- domain_remove_one_dev_info(dmar_domain, dev);
+ domain_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
int ret;
@@ -4487,7 +4496,7 @@
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
@@ -4535,7 +4544,7 @@
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct dmar_domain *dmar_domain = domain->priv;
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dma_pte *pte;
int level = 0;
u64 phys = 0;
@@ -4594,8 +4603,8 @@
static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
- .domain_init = intel_iommu_domain_init,
- .domain_destroy = intel_iommu_domain_destroy,
+ .domain_alloc = intel_iommu_domain_alloc,
+ .domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 14de1ab..6c25b3c 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -631,7 +631,7 @@
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int setup = 0;
+ bool setup = false;
int eim = 0;
if (x2apic_supported()) {
@@ -697,7 +697,7 @@
*/
for_each_iommu(iommu, drhd) {
iommu_set_irq_remapping(iommu, eim);
- setup = 1;
+ setup = true;
}
if (!setup)
@@ -856,7 +856,7 @@
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int ir_supported = 0;
+ bool ir_supported = false;
int ioapic_idx;
for_each_iommu(iommu, drhd)
@@ -864,7 +864,7 @@
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
- ir_supported = 1;
+ ir_supported = true;
}
if (!ir_supported)
@@ -917,7 +917,7 @@
static int reenable_irq_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
- int setup = 0;
+ bool setup = false;
struct intel_iommu *iommu = NULL;
for_each_iommu(iommu, drhd)
@@ -933,7 +933,7 @@
/* Set up interrupt remapping for iommu.*/
iommu_set_irq_remapping(iommu, eim);
- setup = 1;
+ setup = true;
}
if (!setup)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b610a8d..4e46021 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -116,6 +116,8 @@
#define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+#define ARM_LPAE_TCR_EPD1 (1 << 23)
+
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
@@ -621,6 +623,9 @@
}
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+
+ /* Disable speculative walks through TTBR1 */
+ reg |= ARM_LPAE_TCR_EPD1;
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 72e683d..d4f527e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -901,36 +901,24 @@
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
struct iommu_domain *domain;
- int ret;
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
if (!domain)
return NULL;
- domain->ops = bus->iommu_ops;
-
- ret = domain->ops->domain_init(domain);
- if (ret)
- goto out_free;
+ domain->ops = bus->iommu_ops;
+ domain->type = IOMMU_DOMAIN_UNMANAGED;
return domain;
-
-out_free:
- kfree(domain);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
- if (likely(domain->ops->domain_destroy != NULL))
- domain->ops->domain_destroy(domain);
-
- kfree(domain);
+ domain->ops->domain_free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -1049,6 +1037,9 @@
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
@@ -1100,6 +1091,9 @@
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index bc39bdf..1a67c53 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -38,7 +38,7 @@
struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
- struct iommu_domain *io_domain;
+ struct iommu_domain io_domain;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -56,6 +56,11 @@
static DEFINE_SPINLOCK(ipmmu_devices_lock);
static LIST_HEAD(ipmmu_devices);
+static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
+}
+
#define TLB_LOOP_TIMEOUT 100 /* 100us */
/* -----------------------------------------------------------------------------
@@ -428,7 +433,7 @@
* TODO: We need to look up the faulty device based on the I/O VA. Use
* the IOMMU device for now.
*/
- if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
+ if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
@@ -448,7 +453,7 @@
return IRQ_NONE;
io_domain = mmu->mapping->domain;
- domain = io_domain->priv;
+ domain = to_vmsa_domain(io_domain);
return ipmmu_domain_irq(domain);
}
@@ -457,25 +462,25 @@
* IOMMU Operations
*/
-static int ipmmu_domain_init(struct iommu_domain *io_domain)
+static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{
struct ipmmu_vmsa_domain *domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
- return -ENOMEM;
+ return NULL;
spin_lock_init(&domain->lock);
- io_domain->priv = domain;
- domain->io_domain = io_domain;
-
- return 0;
+ return &domain->io_domain;
}
-static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
+static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/*
* Free the domain resources. We assume that all devices have already
@@ -491,7 +496,7 @@
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu;
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
@@ -532,7 +537,7 @@
struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned int i;
for (i = 0; i < archdata->num_utlbs; ++i)
@@ -546,7 +551,7 @@
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
if (!domain)
return -ENODEV;
@@ -557,7 +562,7 @@
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
return domain->iop->unmap(domain->iop, iova, size);
}
@@ -565,7 +570,7 @@
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
- struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/* TODO: Is locking needed ? */
@@ -737,8 +742,8 @@
}
static const struct iommu_ops ipmmu_ops = {
- .domain_init = ipmmu_domain_init,
- .domain_destroy = ipmmu_domain_destroy,
+ .domain_alloc = ipmmu_domain_alloc,
+ .domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e1b0537..15a2063 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -52,8 +52,14 @@
struct msm_priv {
unsigned long *pgtable;
struct list_head list_attached;
+ struct iommu_domain domain;
};
+static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_priv, domain);
+}
+
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
{
int ret;
@@ -79,7 +85,7 @@
static int __flush_iotlb(struct iommu_domain *domain)
{
- struct msm_priv *priv = domain->priv;
+ struct msm_priv *priv = to_msm_priv(domain);
struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = 0;
@@ -209,10 +215,14 @@
SET_M(base, ctx, 1);
}
-static int msm_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
{
- struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ struct msm_priv *priv;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail_nomem;
@@ -224,20 +234,19 @@
goto fail_nomem;
memset(priv->pgtable, 0, SZ_16K);
- domain->priv = priv;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 32) - 1;
- domain->geometry.force_aperture = true;
+ priv->domain.geometry.aperture_start = 0;
+ priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
+ priv->domain.geometry.force_aperture = true;
- return 0;
+ return &priv->domain;
fail_nomem:
kfree(priv);
- return -ENOMEM;
+ return NULL;
}
-static void msm_iommu_domain_destroy(struct iommu_domain *domain)
+static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
@@ -245,20 +254,17 @@
int i;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
- domain->priv = NULL;
+ priv = to_msm_priv(domain);
- if (priv) {
- fl_table = priv->pgtable;
+ fl_table = priv->pgtable;
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
- free_page((unsigned long) __va(((fl_table[i]) &
- FL_BASE_MASK)));
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
- free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
- priv->pgtable = NULL;
- }
+ free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
+ priv->pgtable = NULL;
kfree(priv);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
@@ -276,9 +282,9 @@
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
- if (!priv || !dev) {
+ if (!dev) {
ret = -EINVAL;
goto fail;
}
@@ -330,9 +336,9 @@
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
- if (!priv || !dev)
+ if (!dev)
goto fail;
iommu_drvdata = dev_get_drvdata(dev->parent);
@@ -382,11 +388,7 @@
goto fail;
}
- priv = domain->priv;
- if (!priv) {
- ret = -EINVAL;
- goto fail;
- }
+ priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@@ -484,10 +486,7 @@
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
-
- if (!priv)
- goto fail;
+ priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@@ -566,7 +565,7 @@
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = domain->priv;
+ priv = to_msm_priv(domain);
if (list_empty(&priv->list_attached))
goto fail;
@@ -674,8 +673,8 @@
static const struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
- .domain_init = msm_iommu_domain_init,
- .domain_destroy = msm_iommu_domain_destroy,
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index a4ba851..a22c33d 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -59,6 +59,7 @@
struct omap_iommu *iommu_dev;
struct device *dev;
spinlock_t lock;
+ struct iommu_domain domain;
};
#define MMU_LOCK_BASE_SHIFT 10
@@ -80,6 +81,15 @@
static struct kmem_cache *iopte_cachep;
/**
+ * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
+ * @dom: generic iommu domain handle
+ **/
+static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct omap_iommu_domain, domain);
+}
+
+/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
@@ -901,7 +911,7 @@
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
if (!omap_domain->iommu_dev)
return IRQ_NONE;
@@ -1113,7 +1123,7 @@
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
struct iotlb_entry e;
@@ -1140,7 +1150,7 @@
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
@@ -1152,7 +1162,7 @@
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
@@ -1212,17 +1222,20 @@
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
-static int omap_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
{
struct omap_iommu_domain *omap_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) {
pr_err("kzalloc failed\n");
@@ -1244,25 +1257,21 @@
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
- domain->priv = omap_domain;
+ omap_domain->domain.geometry.aperture_start = 0;
+ omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
+ omap_domain->domain.geometry.force_aperture = true;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = (1ULL << 32) - 1;
- domain->geometry.force_aperture = true;
-
- return 0;
+ return &omap_domain->domain;
fail_nomem:
kfree(omap_domain);
out:
- return -ENOMEM;
+ return NULL;
}
-static void omap_iommu_domain_destroy(struct iommu_domain *domain)
+static void omap_iommu_domain_free(struct iommu_domain *domain)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
-
- domain->priv = NULL;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
/*
* An iommu device is still attached
@@ -1278,7 +1287,7 @@
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
@@ -1358,8 +1367,8 @@
}
static const struct iommu_ops omap_iommu_ops = {
- .domain_init = omap_iommu_domain_init,
- .domain_destroy = omap_iommu_domain_destroy,
+ .domain_alloc = omap_iommu_domain_alloc,
+ .domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9f74fdd..4015560 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -80,6 +80,8 @@
u32 *dt; /* page directory table */
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
+
+ struct iommu_domain domain;
};
struct rk_iommu {
@@ -100,6 +102,11 @@
outer_flush_range(pa_start, pa_end);
}
+static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct rk_iommu_domain, domain);
+}
+
/**
* Inspired by _wait_for in intel_drv.h
* This is NOT safe for use in interrupt context.
@@ -503,7 +510,7 @@
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
phys_addr_t pt_phys, phys = 0;
u32 dte, pte;
@@ -639,7 +646,7 @@
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
@@ -670,7 +677,7 @@
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
size_t size)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
@@ -726,7 +733,7 @@
struct device *dev)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
int ret;
phys_addr_t dte_addr;
@@ -778,7 +785,7 @@
struct device *dev)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */
@@ -804,13 +811,16 @@
dev_info(dev, "Detached from iommu domain\n");
}
-static int rk_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
- return -ENOMEM;
+ return NULL;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -827,17 +837,16 @@
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
- domain->priv = rk_domain;
+ return &rk_domain->domain;
- return 0;
err_dt:
kfree(rk_domain);
- return -ENOMEM;
+ return NULL;
}
-static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+static void rk_iommu_domain_free(struct iommu_domain *domain)
{
- struct rk_iommu_domain *rk_domain = domain->priv;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
int i;
WARN_ON(!list_empty(&rk_domain->iommus));
@@ -852,8 +861,7 @@
}
free_page((unsigned long)rk_domain->dt);
- kfree(domain->priv);
- domain->priv = NULL;
+ kfree(rk_domain);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@@ -952,8 +960,8 @@
}
static const struct iommu_ops rk_iommu_ops = {
- .domain_init = rk_iommu_domain_init,
- .domain_destroy = rk_iommu_domain_destroy,
+ .domain_alloc = rk_iommu_domain_alloc,
+ .domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index f1b0077..a028751 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -42,11 +42,17 @@
spinlock_t map_lock;
spinlock_t attached_list_lock;
struct list_head attached_list;
+ struct iommu_domain domain;
};
static struct shmobile_iommu_archdata *ipmmu_archdata;
static struct kmem_cache *l1cache, *l2cache;
+static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct shmobile_iommu_domain, domain);
+}
+
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
struct kmem_cache *cache, size_t size)
{
@@ -82,31 +88,33 @@
sizeof(val) * count, DMA_TO_DEVICE);
}
-static int shmobile_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
{
struct shmobile_iommu_domain *sh_domain;
int i, ret;
- sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
if (!sh_domain)
- return -ENOMEM;
+ return NULL;
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
if (ret < 0) {
kfree(sh_domain);
- return ret;
+ return NULL;
}
for (i = 0; i < L1_LEN; i++)
sh_domain->l2[i].pgtable = NULL;
spin_lock_init(&sh_domain->map_lock);
spin_lock_init(&sh_domain->attached_list_lock);
INIT_LIST_HEAD(&sh_domain->attached_list);
- domain->priv = sh_domain;
- return 0;
+ return &sh_domain->domain;
}
-static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
+static void shmobile_iommu_domain_free(struct iommu_domain *domain)
{
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int i;
for (i = 0; i < L1_LEN; i++) {
@@ -115,14 +123,13 @@
}
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
kfree(sh_domain);
- domain->priv = NULL;
}
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int ret = -EBUSY;
if (!archdata)
@@ -151,7 +158,7 @@
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
if (!archdata)
return;
@@ -214,7 +221,7 @@
phys_addr_t paddr, size_t size, int prot)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
int ret;
@@ -258,7 +265,7 @@
unsigned long iova, size_t size)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
uint32_t l2entry = 0;
size_t ret = 0;
@@ -298,7 +305,7 @@
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct shmobile_iommu_domain *sh_domain = domain->priv;
+ struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
uint32_t l1entry = 0, l2entry = 0;
unsigned int l1index, l2index;
@@ -355,8 +362,8 @@
}
static const struct iommu_ops shmobile_iommu_ops = {
- .domain_init = shmobile_iommu_domain_init,
- .domain_destroy = shmobile_iommu_domain_destroy,
+ .domain_alloc = shmobile_iommu_domain_alloc,
+ .domain_free = shmobile_iommu_domain_free,
.attach_dev = shmobile_iommu_attach_device,
.detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c48da05..37e708f 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -63,11 +63,21 @@
struct device *dev;
};
+struct gart_domain {
+ struct iommu_domain domain; /* generic domain handle */
+ struct gart_device *gart; /* link to gart device */
+};
+
static struct gart_device *gart_handle; /* unique for a system */
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
+static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct gart_domain, domain);
+}
+
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is
@@ -156,20 +166,11 @@
static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_device *gart;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
struct gart_client *client, *c;
int err = 0;
- gart = gart_handle;
- if (!gart)
- return -EINVAL;
- domain->priv = gart;
-
- domain->geometry.aperture_start = gart->iovmm_base;
- domain->geometry.aperture_end = gart->iovmm_base +
- gart->page_count * GART_PAGE_SIZE - 1;
- domain->geometry.force_aperture = true;
-
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client)
return -ENOMEM;
@@ -198,7 +199,8 @@
static void gart_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
struct gart_client *c;
spin_lock(&gart->client_lock);
@@ -216,33 +218,55 @@
spin_unlock(&gart->client_lock);
}
-static int gart_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
{
- return 0;
+ struct gart_domain *gart_domain;
+ struct gart_device *gart;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ gart = gart_handle;
+ if (!gart)
+ return NULL;
+
+ gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
+ if (!gart_domain)
+ return NULL;
+
+ gart_domain->gart = gart;
+ gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
+ gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
+ gart->page_count * GART_PAGE_SIZE - 1;
+ gart_domain->domain.geometry.force_aperture = true;
+
+ return &gart_domain->domain;
}
-static void gart_iommu_domain_destroy(struct iommu_domain *domain)
+static void gart_iommu_domain_free(struct iommu_domain *domain)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
- if (!gart)
- return;
+ if (gart) {
+ spin_lock(&gart->client_lock);
+ if (!list_empty(&gart->client)) {
+ struct gart_client *c;
- spin_lock(&gart->client_lock);
- if (!list_empty(&gart->client)) {
- struct gart_client *c;
-
- list_for_each_entry(c, &gart->client, list)
- gart_iommu_detach_dev(domain, c->dev);
+ list_for_each_entry(c, &gart->client, list)
+ gart_iommu_detach_dev(domain, c->dev);
+ }
+ spin_unlock(&gart->client_lock);
}
- spin_unlock(&gart->client_lock);
- domain->priv = NULL;
+
+ kfree(gart_domain);
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long flags;
unsigned long pfn;
@@ -265,7 +289,8 @@
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long flags;
if (!gart_iova_range_valid(gart, iova, bytes))
@@ -281,7 +306,8 @@
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct gart_device *gart = domain->priv;
+ struct gart_domain *gart_domain = to_gart_domain(domain);
+ struct gart_device *gart = gart_domain->gart;
unsigned long pte;
phys_addr_t pa;
unsigned long flags;
@@ -310,8 +336,8 @@
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
- .domain_init = gart_iommu_domain_init,
- .domain_destroy = gart_iommu_domain_destroy,
+ .domain_alloc = gart_iommu_domain_alloc,
+ .domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 6e134c7..c845d99 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
@@ -24,6 +25,8 @@
struct tegra_mc *mc;
const struct tegra_smmu_soc *soc;
+ unsigned long pfn_mask;
+
unsigned long *asids;
struct mutex lock;
@@ -31,7 +34,7 @@
};
struct tegra_smmu_as {
- struct iommu_domain *domain;
+ struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
struct page *count;
@@ -40,6 +43,11 @@
u32 attr;
};
+static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
+{
+ return container_of(dom, struct tegra_smmu_as, domain);
+}
+
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
unsigned long offset)
{
@@ -105,8 +113,6 @@
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
-#define SMMU_PFN_MASK 0x000fffff
-
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -224,30 +230,32 @@
return false;
}
-static int tegra_smmu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
unsigned int i;
uint32_t *pd;
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as)
- return -ENOMEM;
+ return NULL;
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->domain = domain;
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!as->pd) {
kfree(as);
- return -ENOMEM;
+ return NULL;
}
as->count = alloc_page(GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
- return -ENOMEM;
+ return NULL;
}
/* clear PDEs */
@@ -264,14 +272,17 @@
for (i = 0; i < SMMU_NUM_PDE; i++)
pd[i] = 0;
- domain->priv = as;
+ /* setup aperture */
+ as->domain.geometry.aperture_start = 0;
+ as->domain.geometry.aperture_end = 0xffffffff;
+ as->domain.geometry.force_aperture = true;
- return 0;
+ return &as->domain;
}
-static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
+static void tegra_smmu_domain_free(struct iommu_domain *domain)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */
ClearPageReserved(as->pd);
@@ -395,7 +406,7 @@
struct device *dev)
{
struct tegra_smmu *smmu = dev->archdata.iommu;
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
unsigned int index = 0;
@@ -428,7 +439,7 @@
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = as->smmu;
struct of_phandle_args args;
@@ -481,7 +492,7 @@
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page);
}
@@ -503,7 +514,7 @@
u32 *pd = page_address(as->pd), *pt;
struct page *page;
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
pt = page_address(page);
/*
@@ -524,7 +535,7 @@
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@@ -548,7 +559,7 @@
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@@ -572,13 +583,13 @@
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct tegra_smmu_as *as = domain->priv;
+ struct tegra_smmu_as *as = to_smmu_as(domain);
struct page *page;
unsigned long pfn;
u32 *pte;
pte = as_get_pte(as, iova, &page);
- pfn = *pte & SMMU_PFN_MASK;
+ pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
}
@@ -633,8 +644,8 @@
static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable,
- .domain_init = tegra_smmu_domain_init,
- .domain_destroy = tegra_smmu_domain_destroy,
+ .domain_alloc = tegra_smmu_domain_alloc,
+ .domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.add_device = tegra_smmu_add_device,
@@ -702,6 +713,10 @@
smmu->dev = dev;
smmu->mc = mc;
+ smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ mc->soc->num_address_bits, smmu->pfn_mask);
+
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
if (soc->supports_request_limit)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9b641b3..8001fe9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@
dm_get(md);
atomic_inc(&md->open_count);
-
out:
spin_unlock(&_minor_lock);
@@ -442,16 +441,20 @@
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md = disk->private_data;
+ struct mapped_device *md;
spin_lock(&_minor_lock);
+ md = disk->private_data;
+ if (WARN_ON(!md))
+ goto out;
+
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
-
+out:
spin_unlock(&_minor_lock);
}
@@ -2241,7 +2244,6 @@
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
- bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->kworker_task)
@@ -2252,19 +2254,22 @@
mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
- blk_integrity_unregister(md->disk);
- del_gendisk(md->disk);
+
cleanup_srcu_struct(&md->io_barrier);
free_table_devices(&md->table_devices);
- free_minor(minor);
+ dm_stats_cleanup(&md->stats);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
-
+ if (blk_get_integrity(md->disk))
+ blk_integrity_unregister(md->disk);
+ del_gendisk(md->disk);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
- dm_stats_cleanup(&md->stats);
+ bdput(md->bdev);
+ free_minor(minor);
+
module_put(THIS_MODULE);
kfree(md);
}
@@ -2642,8 +2647,9 @@
might_sleep();
- spin_lock(&_minor_lock);
map = dm_get_live_table(md, &srcu_idx);
+
+ spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec42..5615522 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@
for (id = kempld_dmi_table;
id->matches[0].slot != DMI_NONE; id++)
if (strstr(id->ident, force_device_id))
- if (id->callback && id->callback(id))
+ if (id->callback && !id->callback(id))
break;
if (id->matches[0].slot == DMI_NONE)
return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede5024..dbd907d 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@
int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
{
u16 value;
+ u8 *buf;
+ int ret;
if (!data)
return -EINVAL;
- *data = 0;
+
+ buf = kzalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
value = swab16(addr);
- return usb_control_msg(ucr->pusb_dev,
+ ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, 0, data, 1, 100);
+ value, 0, buf, 1, 100);
+ *data = *buf;
+
+ kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
@@ -288,18 +297,27 @@
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
+ u16 *buf;
if (!status)
return -EINVAL;
- if (polling_pipe == 0)
+ if (polling_pipe == 0) {
+ buf = kzalloc(sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0),
RTSX_USB_REQ_POLL,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, status, 2, 100);
- else
+ 0, 0, buf, 2, 100);
+ *status = *buf;
+
+ kfree(buf);
+ } else {
ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ }
/* usb_control_msg may return positive when success */
if (ret < 0)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e65..15a8190 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@
{
struct pcnet32_private *lp;
int i, media;
- int fdx, mii, fset, dxsuflo;
+ int fdx, mii, fset, dxsuflo, sram;
int chip_version;
char *chipname;
struct net_device *dev;
@@ -1580,7 +1580,7 @@
}
/* initialize variables */
- fdx = mii = fset = dxsuflo = 0;
+ fdx = mii = fset = dxsuflo = sram = 0;
chip_version = (chip_version >> 12) & 0xffff;
switch (chip_version) {
@@ -1613,6 +1613,7 @@
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2628:
chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@
dxsuflo = 1;
}
+ /*
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
+ * which we can use for the Tx/Rx buffers but most importantly,
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+ * Tx fifo underflows.
+ */
+ if (sram) {
+ /*
+ * The SRAM is being configured in two steps. First we
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+ * to the datasheet, each bit corresponds to a 512-byte
+ * page so we can have at most 24 pages. The SRAM_SIZE
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
+ * The low 8-bits start at 0x00 and end at 0xff. So the
+ * address range is from 0x0000 up to 0x17ff. Therefore,
+ * the SRAM_SIZE is set to 0x17. The next step is to set
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
+ * buffers can share the SRAM equally.
+ */
+ a->write_bcr(ioaddr, 25, 0x17);
+ a->write_bcr(ioaddr, 26, 0xc);
+ /* And finally enable the NOUFLO bit */
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+ }
+
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37a..27b9fe9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@
u16 vlan_tag;
u32 tx_rate;
u32 plink_tracking;
+ u32 privileges;
};
enum vf_state {
@@ -423,6 +424,7 @@
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cf..7f05f30 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@
{
int num_eqs, i = 0;
- if (lancer_chip(adapter) && num > 8) {
- while (num) {
- num_eqs = min(num, 8);
- __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
- i += num_eqs;
- num -= num_eqs;
- }
- } else {
- __be_cmd_modify_eqd(adapter, set_eqd, num);
+ while (num) {
+ num_eqs = min(num, 8);
+ __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
}
return 0;
@@ -1918,7 +1914,7 @@
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num)
+ u32 num, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
wrb, NULL);
+ req->hdr.domain = domain;
req->interface_id = if_id;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e..a7634a3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@
int be_cmd_get_fw_ver(struct be_adapter *adapter);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num);
+ u32 num, u32 domain);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a81685..e6b790f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
- status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@
return 0;
}
+static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ int vf_if_id = vf_cfg->if_handle;
+ int status;
+
+ /* Enable Transparent VLAN Tagging */
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ if (status)
+ return status;
+
+ /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
+ vids[0] = 0;
+ status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Cleared guest VLANs on VF%d", vf);
+
+ /* After TVT is enabled, disallow VFs to program VLAN filters */
+ if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
+ ~BE_PRIV_FILTMGMT, vf + 1);
+ if (!status)
+ vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
+ }
+ return 0;
+}
+
+static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ /* Reset Transparent VLAN Tagging. */
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+
+ /* Allow VFs to program VLAN filtering */
+ if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
+ BE_PRIV_FILTMGMT, vf + 1);
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+ dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
+ }
+ }
+
+ dev_info(dev,
+ "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
+ return 0;
+}
+
static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- int status = 0;
+ int status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1394,24 +1450,19 @@
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan)
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_set_vf_tvt(adapter, vf, vlan);
} else {
- /* Reset Transparent Vlan Tagging. */
- status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
- vf + 1, vf_cfg->if_handle, 0);
+ status = be_clear_vf_tvt(adapter, vf);
}
if (status) {
dev_err(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed : %#x\n", vlan,
- vf, status);
+ "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
+ status);
return be_cmd_status(status);
}
vf_cfg->vlan_tag = vlan;
-
return 0;
}
@@ -2772,14 +2823,12 @@
}
}
} else {
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
+ ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
+ ue_lo_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_LOW_MASK);
+ ue_hi_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_HI_MASK);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@
u32 cap_flags, u32 vf)
{
u32 en_flags;
- int status;
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@
en_flags &= cap_flags;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- if_handle, vf);
-
- return status;
+ return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
}
static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@
if (!BE3_chip(adapter)) {
status = be_cmd_get_profile_config(adapter, &res,
vf + 1);
- if (!status)
+ if (!status) {
cap_flags = res.if_cap_flags;
+ /* Prevent VFs from enabling VLAN promiscuous
+ * mode
+ */
+ cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
}
status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
- u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3433,15 +3482,18 @@
for_all_vfs(adapter, vf_cfg, vf) {
/* Allow VFs to programs MAC/VLAN filters */
- status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
- if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
+ vf + 1);
+ if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter,
- privileges |
+ vf_cfg->privileges |
BE_PRIV_FILTMGMT,
vf + 1);
- if (!status)
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d has FILTMGMT privilege\n",
vf);
+ }
}
/* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@
static int be_map_pci_bars(struct be_adapter *adapter)
{
+ struct pci_dev *pdev = adapter->pdev;
u8 __iomem *addr;
if (BEx_chip(adapter) && be_physfn(adapter)) {
- adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+ adapter->csr = pci_iomap(pdev, 2, 0);
if (!adapter->csr)
return -ENOMEM;
}
- addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
+ addr = pci_iomap(pdev, db_bar(adapter), 0);
if (!addr)
goto pci_map_err;
adapter->db = addr;
+ if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
+ if (be_physfn(adapter)) {
+ /* PCICFG is the 2nd BAR in BE2 */
+ addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
+ if (!addr)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else {
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ }
+ }
+
be_roce_map_pci_bars(adapter);
return 0;
pci_map_err:
- dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
+ dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
be_unmap_pci_bars(adapter);
return -ENOMEM;
}
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index fe48f4c..1762ad3 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@
};
#define CMD_PACKET_SIZE 64
-/* first command after power on can take around 8 seconds */
-#define CMD_TIMEOUT 15000
+#define CMD_TIMEOUT 100
#define CMD_REPLY_RETRY 5
#define CX82310_MTU 1514
@@ -78,8 +77,9 @@
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev, "send command %#x: error %d\n",
- cmd, ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+ cmd, ret);
goto end;
}
@@ -90,8 +90,10 @@
buf, CMD_PACKET_SIZE, &actual_len,
CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev,
- "reply receive error %d\n", ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev,
+ "reply receive error %d\n",
+ ret);
goto end;
}
if (actual_len > 0)
@@ -134,6 +136,8 @@
int ret;
char buf[15];
struct usb_device *udev = dev->udev;
+ u8 link[3];
+ int timeout = 50;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@
if (!dev->partial_data)
return -ENOMEM;
+ /* wait for firmware to become ready (indicated by the link being up) */
+ while (--timeout) {
+ ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
+ link, sizeof(link));
+ /* the command can time out during boot - it's not an error */
+ if (!ret && link[0] == 1 && link[2] == 1)
+ break;
+ msleep(500);
+ };
+ if (!timeout) {
+ dev_err(&udev->dev, "firmware not ready in time\n");
+ return -ETIMEDOUT;
+ }
+
/* enable ethernet mode (?) */
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
if (ret) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f43..1819831 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@
if (!pmic)
return -ENOMEM;
+ if (of_device_is_compatible(node, "ti,tps659038-pmic"))
+ palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
+ TPS659038_REGEN2_CTRL;
+
pmic->dev = &pdev->dev;
pmic->palmas = palmas;
palmas->pmic = pmic;
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d1..3a6fd3a 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@
mrst->dev = NULL;
}
-#ifdef CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int mrst_suspend(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char tmp;
@@ -453,7 +453,7 @@
*/
static inline int mrst_poweroff(struct device *dev)
{
- return mrst_suspend(dev, PMSG_HIBERNATE);
+ return mrst_suspend(dev);
}
static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@
return 0;
}
+static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
+#define MRST_PM_OPS (&mrst_pm_ops)
+
#else
-#define mrst_suspend NULL
-#define mrst_resume NULL
+#define MRST_PM_OPS NULL
static inline int mrst_poweroff(struct device *dev)
{
@@ -529,9 +531,8 @@
.remove = vrtc_mrst_platform_remove,
.shutdown = vrtc_mrst_platform_shutdown,
.driver = {
- .name = (char *) driver_name,
- .suspend = mrst_suspend,
- .resume = mrst_resume,
+ .name = driver_name,
+ .pm = MRST_PM_OPS,
}
};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953..d9afc51a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4_ONLY,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc..9c706d8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d1..4f8c798 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}
@@ -156,7 +157,8 @@
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbd..2b2c359 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@
struct resource *res;
struct device *dev;
void __iomem *base;
- u32 max_freq, iomode;
+ u32 max_freq, iomode, num_cs;
int ret, irq, size;
dev = &pdev->dev;
@@ -550,10 +550,11 @@
}
/* use num-cs unless not present or out of range */
- if (of_property_read_u16(dev->of_node, "num-cs",
- &master->num_chipselect) ||
- (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+ num_cs > SPI_NUM_CHIPSELECTS)
master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ else
+ master->num_chipselect = num_cs;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e5..57a1950 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@
"failed to unprepare message: %d\n", ret);
}
}
+
+ trace_spi_message_done(mesg);
+
master->cur_msg_prepared = false;
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
-
- trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68..0deaa4f 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
#define PDC_WDT_MIN_TIMEOUT 1
#define PDC_WDT_DEF_TIMEOUT 64
-static int heartbeat;
+static int heartbeat = PDC_WDT_DEF_TIMEOUT;
module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
- "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
+ "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@
pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
pdc_wdt->wdt_dev.parent = &pdev->dev;
+ watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
if (ret < 0) {
@@ -232,7 +233,6 @@
watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, pdc_wdt);
- watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_register_device(&pdc_wdt->wdt_dev);
if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df..938b987 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@
u32 reg;
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base = mtk_wdt->wdt_base;
- u32 ret;
+ int ret;
ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
if (ret < 0)