blob: b8514d7895d1c8a326945bffd4c19b479c404f7d [file] [log] [blame]
Andy Shevchenkob466a372019-01-07 13:07:41 +02001// SPDX-License-Identifier: GPL-2.0
Andy Shevchenko9cade1a2013-06-05 15:26:45 +03002/*
3 * Platform driver for the Synopsys DesignWare DMA Controller
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
8 *
9 * Some parts of this driver are derived from the original dw_dmac.
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030010 */
11
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/clk.h>
Andy Shevchenko6acf3992015-01-13 18:57:15 +020015#include <linux/pm_runtime.h>
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030016#include <linux/platform_device.h>
17#include <linux/dmaengine.h>
18#include <linux/dma-mapping.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/acpi.h>
22#include <linux/acpi_dma.h>
23
24#include "internal.h"
25
Andy Shevchenkoa104a452015-03-09 12:16:42 +020026#define DRV_NAME "dw_dmac"
27
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030028static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
29 struct of_dma *ofdma)
30{
31 struct dw_dma *dw = ofdma->of_dma_data;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030032 struct dw_dma_slave slave = {
33 .dma_dev = dw->dma.dev,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030034 };
35 dma_cap_mask_t cap;
36
37 if (dma_spec->args_count != 3)
38 return NULL;
39
Andy Shevchenko4d130de2014-08-19 20:29:16 +030040 slave.src_id = dma_spec->args[0];
41 slave.dst_id = dma_spec->args[0];
Andy Shevchenkoc4220252016-03-18 16:24:41 +020042 slave.m_master = dma_spec->args[1];
43 slave.p_master = dma_spec->args[2];
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030044
Andy Shevchenko4d130de2014-08-19 20:29:16 +030045 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
46 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
Andy Shevchenko161c3d02016-04-27 14:15:39 +030047 slave.m_master >= dw->pdata->nr_masters ||
48 slave.p_master >= dw->pdata->nr_masters))
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030049 return NULL;
50
51 dma_cap_zero(cap);
52 dma_cap_set(DMA_SLAVE, cap);
53
54 /* TODO: there should be a simpler way to do this */
Andy Shevchenko4d130de2014-08-19 20:29:16 +030055 return dma_request_channel(cap, dw_dma_filter, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030056}
57
58#ifdef CONFIG_ACPI
59static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
60{
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030061 struct acpi_dma_spec *dma_spec = param;
Andy Shevchenko4d130de2014-08-19 20:29:16 +030062 struct dw_dma_slave slave = {
63 .dma_dev = dma_spec->dev,
64 .src_id = dma_spec->slave_id,
65 .dst_id = dma_spec->slave_id,
Andy Shevchenkoc4220252016-03-18 16:24:41 +020066 .m_master = 0,
67 .p_master = 1,
Andy Shevchenko4d130de2014-08-19 20:29:16 +030068 };
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030069
Andy Shevchenko4d130de2014-08-19 20:29:16 +030070 return dw_dma_filter(chan, &slave);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030071}
72
73static void dw_dma_acpi_controller_register(struct dw_dma *dw)
74{
75 struct device *dev = dw->dma.dev;
76 struct acpi_dma_filter_info *info;
77 int ret;
78
Andy Shevchenko84da0422019-08-20 16:15:44 +030079 if (!has_acpi_companion(dev))
80 return;
81
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030082 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
83 if (!info)
84 return;
85
86 dma_cap_zero(info->dma_cap);
87 dma_cap_set(DMA_SLAVE, info->dma_cap);
88 info->filter_fn = dw_dma_acpi_filter;
89
Andy Shevchenkoe7b85142019-08-20 16:15:43 +030090 ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +030091 if (ret)
92 dev_err(dev, "could not register acpi_dma_controller\n");
93}
Andy Shevchenkoe7b85142019-08-20 16:15:43 +030094
95static void dw_dma_acpi_controller_free(struct dw_dma *dw)
96{
97 struct device *dev = dw->dma.dev;
98
Andy Shevchenko84da0422019-08-20 16:15:44 +030099 if (!has_acpi_companion(dev))
100 return;
101
Andy Shevchenkoe7b85142019-08-20 16:15:43 +0300102 acpi_dma_controller_free(dev);
103}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300104#else /* !CONFIG_ACPI */
105static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
Andy Shevchenkoe7b85142019-08-20 16:15:43 +0300106static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300107#endif /* !CONFIG_ACPI */
108
109#ifdef CONFIG_OF
110static struct dw_dma_platform_data *
111dw_dma_parse_dt(struct platform_device *pdev)
112{
113 struct device_node *np = pdev->dev.of_node;
114 struct dw_dma_platform_data *pdata;
Eugeniy Paltsevbd2c6632016-11-25 17:59:07 +0300115 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
Andy Shevchenko969f7502016-04-27 14:15:37 +0300116 u32 nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000117 u32 nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300118
119 if (!np) {
120 dev_err(&pdev->dev, "Missing DT data\n");
121 return NULL;
122 }
123
Andy Shevchenko969f7502016-04-27 14:15:37 +0300124 if (of_property_read_u32(np, "dma-masters", &nr_masters))
125 return NULL;
126 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
127 return NULL;
128
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000129 if (of_property_read_u32(np, "dma-channels", &nr_channels))
130 return NULL;
Eugeniy Paltsevbd2c6632016-11-25 17:59:07 +0300131 if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
132 return NULL;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000133
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300134 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
135 if (!pdata)
136 return NULL;
137
Andy Shevchenko969f7502016-04-27 14:15:37 +0300138 pdata->nr_masters = nr_masters;
Mans Rullgard2b574ba2015-12-17 23:30:57 +0000139 pdata->nr_channels = nr_channels;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300140
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300141 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
142 pdata->chan_allocation_order = (unsigned char)tmp;
143
144 if (!of_property_read_u32(np, "chan_priority", &tmp))
145 pdata->chan_priority = tmp;
146
147 if (!of_property_read_u32(np, "block_size", &tmp))
148 pdata->block_size = tmp;
149
Andy Shevchenko2e650602016-04-27 14:15:38 +0300150 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
Andy Shevchenko969f7502016-04-27 14:15:37 +0300151 for (tmp = 0; tmp < nr_masters; tmp++)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300152 pdata->data_width[tmp] = arr[tmp];
Andy Shevchenko2e650602016-04-27 14:15:38 +0300153 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
154 for (tmp = 0; tmp < nr_masters; tmp++)
155 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
Andy Shevchenko969f7502016-04-27 14:15:37 +0300156 }
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300157
Eugeniy Paltsevbd2c6632016-11-25 17:59:07 +0300158 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
159 for (tmp = 0; tmp < nr_channels; tmp++)
160 pdata->multi_block[tmp] = mb[tmp];
161 } else {
162 for (tmp = 0; tmp < nr_channels; tmp++)
163 pdata->multi_block[tmp] = 1;
164 }
165
Christian Lamparter7b0c03e2018-11-17 17:17:21 +0100166 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
167 if (tmp > CHAN_PROTCTL_MASK)
168 return NULL;
169 pdata->protctl = tmp;
170 }
171
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300172 return pdata;
173}
174#else
175static inline struct dw_dma_platform_data *
176dw_dma_parse_dt(struct platform_device *pdev)
177{
178 return NULL;
179}
180#endif
181
182static int dw_probe(struct platform_device *pdev)
183{
Andy Shevchenkob3757412019-08-20 16:15:40 +0300184 const struct dw_dma_chip_pdata *match;
185 struct dw_dma_chip_pdata *data;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300186 struct dw_dma_chip *chip;
187 struct device *dev = &pdev->dev;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300188 int err;
189
Andy Shevchenkob3757412019-08-20 16:15:40 +0300190 match = device_get_match_data(dev);
191 if (!match)
192 return -ENODEV;
193
194 data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL);
195 if (!data)
196 return -ENOMEM;
197
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300198 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
199 if (!chip)
200 return -ENOMEM;
201
202 chip->irq = platform_get_irq(pdev, 0);
203 if (chip->irq < 0)
204 return chip->irq;
205
Andy Shevchenkoa9c56722019-08-20 16:15:42 +0300206 chip->regs = devm_platform_ioremap_resource(pdev, 0);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300207 if (IS_ERR(chip->regs))
208 return PTR_ERR(chip->regs);
209
Russell King24353b82013-06-27 13:37:21 +0100210 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
211 if (err)
212 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300213
Andy Shevchenkof8d9ddb2019-08-20 16:15:41 +0300214 if (!data->pdata)
215 data->pdata = dev_get_platdata(dev);
216 if (!data->pdata)
217 data->pdata = dw_dma_parse_dt(pdev);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300218
219 chip->dev = dev;
Andy Shevchenko08d62f52017-01-17 13:57:26 +0200220 chip->id = pdev->id;
Andy Shevchenkof8d9ddb2019-08-20 16:15:41 +0300221 chip->pdata = data->pdata;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300222
Andy Shevchenkob3757412019-08-20 16:15:40 +0300223 data->chip = chip;
224
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300225 chip->clk = devm_clk_get(chip->dev, "hclk");
226 if (IS_ERR(chip->clk))
227 return PTR_ERR(chip->clk);
228 err = clk_prepare_enable(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300229 if (err)
230 return err;
231
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200232 pm_runtime_enable(&pdev->dev);
233
Andy Shevchenkob3757412019-08-20 16:15:40 +0300234 err = data->probe(chip);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300235 if (err)
236 goto err_dw_dma_probe;
237
Andy Shevchenkob3757412019-08-20 16:15:40 +0300238 platform_set_drvdata(pdev, data);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300239
240 if (pdev->dev.of_node) {
241 err = of_dma_controller_register(pdev->dev.of_node,
242 dw_dma_of_xlate, chip->dw);
243 if (err)
244 dev_err(&pdev->dev,
245 "could not register of_dma_controller\n");
246 }
247
Andy Shevchenko84da0422019-08-20 16:15:44 +0300248 dw_dma_acpi_controller_register(chip->dw);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300249
250 return 0;
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300251
252err_dw_dma_probe:
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200253 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300254 clk_disable_unprepare(chip->clk);
255 return err;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300256}
257
258static int dw_remove(struct platform_device *pdev)
259{
Andy Shevchenkob3757412019-08-20 16:15:40 +0300260 struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
261 struct dw_dma_chip *chip = data->chip;
262 int ret;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300263
Andy Shevchenko84da0422019-08-20 16:15:44 +0300264 dw_dma_acpi_controller_free(chip->dw);
Andy Shevchenkoe7b85142019-08-20 16:15:43 +0300265
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300266 if (pdev->dev.of_node)
267 of_dma_controller_free(pdev->dev.of_node);
268
Andy Shevchenkob3757412019-08-20 16:15:40 +0300269 ret = data->remove(chip);
270 if (ret)
271 dev_warn(chip->dev, "can't remove device properly: %d\n", ret);
272
Andy Shevchenko6acf3992015-01-13 18:57:15 +0200273 pm_runtime_disable(&pdev->dev);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300274 clk_disable_unprepare(chip->clk);
275
276 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300277}
278
279static void dw_shutdown(struct platform_device *pdev)
280{
Andy Shevchenkob3757412019-08-20 16:15:40 +0300281 struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
282 struct dw_dma_chip *chip = data->chip;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300283
Andy Shevchenko32146582015-12-04 23:49:23 +0200284 /*
Andy Shevchenko69da8be2019-01-07 13:07:38 +0200285 * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
Andy Shevchenko32146582015-12-04 23:49:23 +0200286 * some platforms we can't do that since DMA device is powered off.
287 * Moreover we have no possibility to check if the platform is affected
288 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
289 * unconditionally. On the other hand we can't use
290 * pm_runtime_suspended() because runtime PM framework is not fully
291 * used by the driver.
292 */
293 pm_runtime_get_sync(chip->dev);
Andy Shevchenko69da8be2019-01-07 13:07:38 +0200294 do_dw_dma_disable(chip);
Andy Shevchenko32146582015-12-04 23:49:23 +0200295 pm_runtime_put_sync_suspend(chip->dev);
296
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300297 clk_disable_unprepare(chip->clk);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300298}
299
300#ifdef CONFIG_OF
301static const struct of_device_id dw_dma_of_id_table[] = {
Andy Shevchenkob3757412019-08-20 16:15:40 +0300302 { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300303 {}
304};
305MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
306#endif
307
308#ifdef CONFIG_ACPI
309static const struct acpi_device_id dw_dma_acpi_id_table[] = {
Andy Shevchenkob3757412019-08-20 16:15:40 +0300310 { "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata },
311 { "80862286", (kernel_ulong_t)&dw_dma_chip_pdata },
312 { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
Andy Shevchenkof8d9ddb2019-08-20 16:15:41 +0300313
314 /* Elkhart Lake iDMA 32-bit (PSE DMA) */
315 { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
316 { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
317 { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
318
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300319 { }
320};
Andy Shevchenkobe480dc2013-07-15 15:04:37 +0300321MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300322#endif
323
324#ifdef CONFIG_PM_SLEEP
325
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300326static int dw_suspend_late(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300327{
Andy Shevchenkob3757412019-08-20 16:15:40 +0300328 struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
329 struct dw_dma_chip *chip = data->chip;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300330
Andy Shevchenko69da8be2019-01-07 13:07:38 +0200331 do_dw_dma_disable(chip);
Andy Shevchenkoa15636e2014-08-19 20:29:17 +0300332 clk_disable_unprepare(chip->clk);
333
334 return 0;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300335}
336
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300337static int dw_resume_early(struct device *dev)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300338{
Andy Shevchenkob3757412019-08-20 16:15:40 +0300339 struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
340 struct dw_dma_chip *chip = data->chip;
Arvind Yadav702fce02017-05-22 16:01:48 +0530341 int ret;
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300342
Arvind Yadav702fce02017-05-22 16:01:48 +0530343 ret = clk_prepare_enable(chip->clk);
344 if (ret)
345 return ret;
346
Andy Shevchenko69da8be2019-01-07 13:07:38 +0200347 return do_dw_dma_enable(chip);
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300348}
349
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300350#endif /* CONFIG_PM_SLEEP */
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300351
352static const struct dev_pm_ops dw_dev_pm_ops = {
Andy Shevchenko067bd4f2014-04-15 16:18:41 +0300353 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300354};
355
356static struct platform_driver dw_driver = {
357 .probe = dw_probe,
358 .remove = dw_remove,
Andy Shevchenko2540f742014-09-23 17:18:13 +0300359 .shutdown = dw_shutdown,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300360 .driver = {
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200361 .name = DRV_NAME,
Andy Shevchenko9cade1a2013-06-05 15:26:45 +0300362 .pm = &dw_dev_pm_ops,
363 .of_match_table = of_match_ptr(dw_dma_of_id_table),
364 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
365 },
366};
367
368static int __init dw_init(void)
369{
370 return platform_driver_register(&dw_driver);
371}
372subsys_initcall(dw_init);
373
374static void __exit dw_exit(void)
375{
376 platform_driver_unregister(&dw_driver);
377}
378module_exit(dw_exit);
379
380MODULE_LICENSE("GPL v2");
381MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
Andy Shevchenkoa104a452015-03-09 12:16:42 +0200382MODULE_ALIAS("platform:" DRV_NAME);