summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2012-11-06 11:27:29 +0000
committerAndy Fleming <afleming@freescale.com>2012-11-27 17:26:48 -0600
commit84d35b2863455bedb9986c2b076241e8a441fc3e (patch)
tree9cb3af890b370091b0460d930b5829fe3d2abb62 /drivers/mmc
parent4ea7a09fafdf0592cb99428090946bf15128ea44 (diff)
downloadu-boot-imx-84d35b2863455bedb9986c2b076241e8a441fc3e.zip
u-boot-imx-84d35b2863455bedb9986c2b076241e8a441fc3e.tar.gz
u-boot-imx-84d35b2863455bedb9986c2b076241e8a441fc3e.tar.bz2
common: rework bouncebuf implementation
The current bouncebuf API requires all parameters to be passed to both bounce_buffer_start() and bounce_buffer_stop(). Modify the bouncebuf start function to accept a state structure as a parameter, and only require that state struct to be passed to the stop function. This simplifies usage of the bounce buffer by clients. Don't modify the data pointer, but rather store the temporary buffer in this state struct. The bouncebuf code ensures that client code can always use a single buffer pointer in the state structure, irrespective of whether a bounce buffer actually had to be allocated. Move cache management logic into the bounce buffer code, so that each client doesn't have to duplicate this. I believe there's no need to invalidate the buffer before a DMA operation, since flushing the cache should prevent any write-backs. Update the MXS MMC driver for this change. Signed-off-by: Stephen Warren <swarren@nvidia.com> Acked-by: Simon Glass <sjg@chromium.org> Tested-by: Simon Glass <sjg@chromium.org> Signed-off-by: Andy Fleming <afleming@freescale.com>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/mxsmmc.c30
1 files changed, 7 insertions, 23 deletions
diff --git a/drivers/mmc/mxsmmc.c b/drivers/mmc/mxsmmc.c
index 109acbf..024df59 100644
--- a/drivers/mmc/mxsmmc.c
+++ b/drivers/mmc/mxsmmc.c
@@ -96,11 +96,11 @@ static int mxsmmc_send_cmd_pio(struct mxsmmc_priv *priv, struct mmc_data *data)
static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
{
uint32_t data_count = data->blocksize * data->blocks;
- uint32_t cache_data_count = roundup(data_count, ARCH_DMA_MINALIGN);
int dmach;
struct mxs_dma_desc *desc = priv->desc;
- void *addr, *backup;
- uint8_t flags;
+ void *addr;
+ unsigned int flags;
+ struct bounce_buffer bbstate;
memset(desc, 0, sizeof(struct mxs_dma_desc));
desc->address = (dma_addr_t)desc;
@@ -115,19 +115,9 @@ static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
flags = GEN_BB_READ;
}
- bounce_buffer_start(&addr, data_count, &backup, flags);
+ bounce_buffer_start(&bbstate, addr, data_count, flags);
- priv->desc->cmd.address = (dma_addr_t)addr;
-
- if (data->flags & MMC_DATA_WRITE) {
- /* Flush data to DRAM so DMA can pick them up */
- flush_dcache_range((uint32_t)addr,
- (uint32_t)(addr) + cache_data_count);
- }
-
- /* Invalidate the area, so no writeback into the RAM races with DMA */
- invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
- (uint32_t)(priv->desc->cmd.address + cache_data_count));
+ priv->desc->cmd.address = (dma_addr_t)bbstate.bounce_buffer;
priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM |
(data_count << MXS_DMA_DESC_BYTES_OFFSET);
@@ -135,17 +125,11 @@ static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + priv->id;
mxs_dma_desc_append(dmach, priv->desc);
if (mxs_dma_go(dmach)) {
- bounce_buffer_stop(&addr, data_count, &backup, flags);
+ bounce_buffer_stop(&bbstate);
return COMM_ERR;
}
- /* The data arrived into DRAM, invalidate cache over them */
- if (data->flags & MMC_DATA_READ) {
- invalidate_dcache_range((uint32_t)addr,
- (uint32_t)(addr) + cache_data_count);
- }
-
- bounce_buffer_stop(&addr, data_count, &backup, flags);
+ bounce_buffer_stop(&bbstate);
return 0;
}