Skip to content

Commit

Permalink
spi: Fix cache corruption due to DMA/PIO overlap
Browse files Browse the repository at this point in the history
The SPI core DMA mapping support performs cache management once for the
entire message and not between transfers, and this leads to cache
corruption if a message has two or more RX transfers with both
transfers targeting the same cache line, and the controller driver
decides to handle one using DMA and the other using PIO (for example,
because one is much larger than the other).

Fix it by syncing before/after the actual transfers.  This also means
that we can skip the sync during the map/unmap of the message.

Fixes: 99adef3 ("spi: Provide core support for DMA mapping transfers")
Signed-off-by: Vincent Whitchurch <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Mark Brown <[email protected]>
  • Loading branch information
vwax authored and broonie committed Sep 28, 2022
1 parent f25723d commit 0c17ba7
Showing 1 changed file with 88 additions and 21 deletions.
109 changes: 88 additions & 21 deletions drivers/spi/spi.c
Original file line number Diff line number Diff line change
Expand Up @@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
}

#ifdef CONFIG_HAS_DMA
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
Expand Down Expand Up @@ -1078,28 +1078,39 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}

ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
if (!ret)
ret = -ENOMEM;
ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}

sgt->nents = ret;

return 0;
}

void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
}

static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir,
unsigned long attrs)
{
if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
}
}

void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}

static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
Expand All @@ -1124,24 +1135,30 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
rx_dev = ctlr->dev.parent;

list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;

if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;

if (xfer->tx_buf != NULL) {
ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf,
xfer->len, DMA_TO_DEVICE,
attrs);
if (ret != 0)
return ret;
}

if (xfer->rx_buf != NULL) {
ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE, attrs);
if (ret != 0) {
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE);
spi_unmap_buf_attrs(ctlr, tx_dev,
&xfer->tx_sg, DMA_TO_DEVICE,
attrs);

return ret;
}
}
Expand All @@ -1164,17 +1181,52 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0;

list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;

if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;

spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE, attrs);
}

ctlr->cur_msg_mapped = false;

return 0;
}

static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;

if (!ctlr->cur_msg_mapped)
return;

if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}

static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;

if (!ctlr->cur_msg_mapped)
return;

if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
Expand All @@ -1187,6 +1239,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}

static void spi_dma_sync_for_device(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}

static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}
#endif /* !CONFIG_HAS_DMA */

static inline int spi_unmap_msg(struct spi_controller *ctlr,
Expand Down Expand Up @@ -1445,8 +1507,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);

fallback_pio:
spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, xfer);

if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
Expand All @@ -1469,6 +1534,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (ret < 0)
msg->status = ret;
}

spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
Expand Down

0 comments on commit 0c17ba7

Please sign in to comment.