From: Girish K S girishks2...@gmail.com
The 64xx spi driver supports partial polling mode.
Only the last chunk of the transfer length is transferred
or recieved in polling mode.
Some SoC's that adopt this controller might not have have dma
interface. This patch adds support for complete polling mode
and gives flexibity for the user to select poll/dma mode.
Signed-off-by: Girish K S ks.g...@samsung.com
---
changes in v4:
Handled the dma allocation failure and switching to poll
if dma resource allocation failed
changes in v3:
Separated the polling mode and gpio handling separately
changes in v2:
changed the logic to handle the buffer from the user space.
moved out the timeout code as a separate function.
drivers/spi/spi-s3c64xx.c | 133 ++---
1 files changed, 89 insertions(+), 44 deletions(-)
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 4188b2f..a6fdc71 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -35,6 +35,7 @@
#include linux/platform_data/spi-s3c64xx.h
#define MAX_SPI_PORTS 3
+#define S3C64XX_SPI_QUIRK_POLL (1 0)
/* Registers and bit-fields */
@@ -126,6 +127,7 @@
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define is_polling(x) (x-port_conf-quirks S3C64XX_SPI_QUIRK_POLL)
#define RXBUSY(12)
#define TXBUSY(13)
@@ -154,6 +156,7 @@ struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
int tx_st_done;
+ int quirks;
boolhigh_speed;
boolclk_from_cmu;
};
@@ -419,6 +422,27 @@ static inline void enable_cs(struct
s3c64xx_spi_driver_data *sdd,
cs = spi-controller_data;
gpio_set_value(cs-line, spi-mode SPI_CS_HIGH ? 1 : 0);
+
+ /* Start the signals */
+ writel(0, sdd-regs + S3C64XX_SPI_SLAVE_SEL);
+}
+
+static u32 wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+ int timeout_ms)
+{
+ void __iomem *regs = sdd-regs;
+ unsigned long val;
+ u32 status;
+ /* max fifo depth available */
+ u32 max_fifo = (FIFO_LVL_MASK(sdd) 1) + 1;
+
+ val = msecs_to_loops(timeout_ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) max_fifo --val);
+
+ /* return the actual received data length */
+ return RX_FIFO_LVL(status, sdd);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
@@ -443,20 +467,19 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data
*sdd,
} while (RX_FIFO_LVL(status, sdd) xfer-len --val);
}
- if (!val)
- return -EIO;
-
if (dma_mode) {
u32 status;
/*
+* If the previous xfer was completed within timeout, then
+* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
- if (xfer-rx_buf == NULL) {
+ if (val !xfer-rx_buf) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
while ((TX_FIFO_LVL(status, sdd)
@@ -466,30 +489,53 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data
*sdd,
status = readl(regs + S3C64XX_SPI_STATUS);
}
- if (!val)
- return -EIO;
}
+
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
} else {
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+
/* If it was only Tx */
- if (xfer-rx_buf == NULL) {
+ if (!xfer-rx_buf) {
sdd-state = ~TXBUSY;
return 0;
}
- switch (sdd-cur_bpw) {
- case 32:
- ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- xfer-rx_buf, xfer-len / 4);
- break;
- case 16:
- ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- xfer-rx_buf, xfer-len / 2);
- break;
- default:
- ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
-