spi: Provide core support for DMA mapping transfers

The process of DMA mapping buffers for SPI transfers does not vary between
devices so in order to save duplication of code in drivers this can be
factored out into the core, allowing it to be integrated with the work that
is being done on factoring out the common elements from the data path
including more sharing of dmaengine code.

In order to use this masters need to provide a can_dma() operation and while
the hardware is prepared they should ensure that DMA channels are provided
in tx_dma and rx_dma. The core will then ensure that the buffers are mapped
for DMA prior to calling transfer_one_message().

Currently the cleanup on error is not complete, this needs to be improved.

Signed-off-by: Mark Brown <broonie@linaro.org>
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0..bcdaa74 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/mutex.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
@@ -580,6 +582,77 @@
 		spi->master->set_cs(spi, !enable);
 }
 
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+	struct device *dev = master->dev.parent;
+	struct device *tx_dev, *rx_dev;
+	struct spi_transfer *xfer;
+
+	if (msg->is_dma_mapped || !master->can_dma)
+		return 0;
+
+	tx_dev = &master->dma_tx->dev->device;
+	rx_dev = &master->dma_rx->dev->device;
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (!master->can_dma(master, msg->spi, xfer))
+			continue;
+
+		if (xfer->tx_buf != NULL) {
+			xfer->tx_dma = dma_map_single(tx_dev,
+						      (void *)xfer->tx_buf,
+						      xfer->len,
+						      DMA_TO_DEVICE);
+			if (dma_mapping_error(dev, xfer->tx_dma)) {
+				dev_err(dev, "dma_map_single Tx failed\n");
+				return -ENOMEM;
+			}
+		}
+
+		if (xfer->rx_buf != NULL) {
+			xfer->rx_dma = dma_map_single(rx_dev,
+						      xfer->rx_buf, xfer->len,
+						      DMA_FROM_DEVICE);
+			if (dma_mapping_error(dev, xfer->rx_dma)) {
+				dev_err(dev, "dma_map_single Rx failed\n");
+				dma_unmap_single(tx_dev, xfer->tx_dma,
+						 xfer->len, DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+		}
+	}
+
+	master->cur_msg_mapped = true;
+
+	return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+	struct spi_transfer *xfer;
+	struct device *tx_dev, *rx_dev;
+
+	if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma)
+		return 0;
+
+	tx_dev = &master->dma_tx->dev->device;
+	rx_dev = &master->dma_rx->dev->device;
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (!master->can_dma(master, msg->spi, xfer))
+			continue;
+
+		if (xfer->rx_buf)
+			dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len,
+					 DMA_FROM_DEVICE);
+		if (xfer->tx_buf)
+			dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
+					 DMA_TO_DEVICE);
+	}
+
+	return 0;
+}
+
 /*
  * spi_transfer_one_message - Default implementation of transfer_one_message()
  *
@@ -752,6 +825,13 @@
 		master->cur_msg_prepared = true;
 	}
 
+	ret = spi_map_msg(master, master->cur_msg);
+	if (ret) {
+		master->cur_msg->status = ret;
+		spi_finalize_current_message(master);
+		return;
+	}
+
 	ret = master->transfer_one_message(master, master->cur_msg);
 	if (ret) {
 		dev_err(&master->dev,
@@ -841,6 +921,8 @@
 	queue_kthread_work(&master->kworker, &master->pump_messages);
 	spin_unlock_irqrestore(&master->queue_lock, flags);
 
+	spi_unmap_msg(master, mesg);
+
 	if (master->cur_msg_prepared && master->unprepare_message) {
 		ret = master->unprepare_message(master, mesg);
 		if (ret) {