Patch against 2.6.30

Use the scatterlist function to map sequencial page. And use half of the
mmc memory for reading and the other half for writing. Open issue the
slow clock setting and resetting. Tested on android framework.

---
Not-really-signed-off-by: Michael Trimarchi <[email protected]>

--- openmoko/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-mci.c	2009-09-10 09:40:30.000000000 +0200
+++ drivers/mfd/glamo/glamo-mci.c	2009-09-24 18:34:09.000000000 +0200
@@ -11,6 +11,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/bio.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/sd.h>
 #include <linux/mmc/host.h>
@@ -24,6 +25,7 @@
 #include <linux/io.h>
 #include <linux/regulator/consumer.h>
 #include <linux/mfd/glamo.h>
+#include <asm/unaligned.h>
 
 #include "glamo-core.h"
 #include "glamo-regs.h"
@@ -55,13 +57,16 @@
 	struct work_struct irq_work;
 	struct work_struct read_work;
 
-	unsigned clk_enabled : 1;
+	unsigned clk_enabled:1;
+	int force_slow_during_powerup;
 
 };
 
-static void glamo_mci_send_request(struct mmc_host *mmc, struct mmc_request* mrq);
+static void glamo_mci_send_request(struct mmc_host *mmc,
+				   struct mmc_request *mrq);
+
 static void glamo_mci_send_command(struct glamo_mci_host *host,
-				  struct mmc_command *cmd);
+				   struct mmc_command *cmd);
 
 /*
  * Max SD clock rate
@@ -71,7 +76,7 @@
  *
  * you can override this on kernel commandline using
  *
- *   glamo_mci.sd_max_clk=10000000
+ * glamo_mci.sd_max_clk = 10000000
  *
  * for example
  */
@@ -84,7 +89,7 @@
  *
  * you can override this on kernel commandline using
  *
- *   glamo_mci.sd_slow_ratio=8
+ *   glamo_mci.sd_slow_ratio = 8
  *
  * for example
  *
@@ -139,14 +144,16 @@
 	glamo_reg_write(glamo, reg, tmp);
 }
 
-static void glamo_mci_clock_disable(struct glamo_mci_host *host) {
+static void glamo_mci_clock_disable(struct glamo_mci_host *host)
+{
 	if (host->clk_enabled) {
 		glamo_engine_div_disable(host->core, GLAMO_ENGINE_MMC);
 		host->clk_enabled = 0;
 	}
 }
 
-static void glamo_mci_clock_enable(struct glamo_mci_host *host) {
+static void glamo_mci_clock_enable(struct glamo_mci_host *host)
+{
 	del_timer_sync(&host->disable_timer);
 
 	if (!host->clk_enabled) {
@@ -155,50 +162,78 @@
 	}
 }
 
-static void glamo_mci_disable_timer(unsigned long data) {
-	struct glamo_mci_host *host = (struct glamo_mci_host *)data;
+static void glamo_mci_disable_timer(unsigned long data)
+{
+	struct glamo_mci_host *host = (struct glamo_mci_host *) data;
+
 	glamo_mci_clock_disable(host);
 }
 
 
 static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data)
 {
-	struct scatterlist *sg;
-	u16 __iomem *from_ptr = host->data_base;
+	struct sg_mapping_iter miter;
+	u16 __iomem *from_ptr;
+	size_t blocks = data->blocks;
 	void *sg_pointer;
 
-	dev_dbg(&host->pdev->dev, "pio_read():\n");
-	for (sg = data->sg; sg; sg = sg_next(sg)) {
-		sg_pointer = page_address(sg_page(sg)) + sg->offset;
 
+	from_ptr = host->data_base + (resource_size(host->data_mem) / 4);
+
+	sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG);
+	dev_dbg(&host->pdev->dev, "pio_read(): data len %d\n", data->sg_len);
+
+	while (blocks-- > 0) {
+		if (!sg_miter_next(&miter))
+			break;
 
-		memcpy(sg_pointer, from_ptr, sg->length);
-		from_ptr += sg->length >> 1;
+		sg_pointer = miter.addr;
 
-		data->bytes_xfered += sg->length;
+		memcpy(sg_pointer, from_ptr, miter.length);
+		from_ptr += miter.length >> 1;
+
+		data->bytes_xfered += miter.length;
 	}
 
 	dev_dbg(&host->pdev->dev, "pio_read(): "
 			"complete (no more data).\n");
+
+	sg_miter_stop(&miter);
+
+	if (host->force_slow_during_powerup)
+		host->force_slow_during_powerup = 0;
 }
 
 static void do_pio_write(struct glamo_mci_host *host, struct mmc_data *data)
 {
-	struct scatterlist *sg;
+	struct sg_mapping_iter miter;
+	size_t blocks = data->blocks;
 	u16 __iomem *to_ptr = host->data_base;
 	void *sg_pointer;
 
 	dev_dbg(&host->pdev->dev, "pio_write():\n");
-	for (sg = data->sg; sg; sg = sg_next(sg)) {
-		sg_pointer = page_address(sg_page(sg)) + sg->offset;
 
-		data->bytes_xfered += sg->length;
+	sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG);
+
+	while (blocks-- > 0) {
+
+		if (!sg_miter_next(&miter))
+			break;
+
+		sg_pointer = miter.addr;
+
+		memcpy(to_ptr, sg_pointer, miter.length);
+		to_ptr += miter.length >> 1;
 
-		memcpy(to_ptr, sg_pointer, sg->length);
-		to_ptr += sg->length >> 1;
+		data->bytes_xfered += miter.length;
 	}
 
+	sg_miter_stop(&miter);
+
 	dev_dbg(&host->pdev->dev, "pio_write(): complete\n");
+
+	if (host->force_slow_during_powerup)
+		host->force_slow_during_powerup = 0;
 }
 
 static int glamo_mci_set_card_clock(struct glamo_mci_host *host, int freq)
@@ -207,7 +242,8 @@
 
 	if (freq) {
 		glamo_mci_clock_enable(host);
-		real_rate = glamo_engine_reclock(host->core, GLAMO_ENGINE_MMC, freq);
+		real_rate = glamo_engine_reclock(host->core,
+						 GLAMO_ENGINE_MMC, freq);
 	} else {
 		glamo_mci_clock_disable(host);
 	}
@@ -215,51 +251,51 @@
 	return real_rate;
 }
 
-static void glamo_mci_request_done(struct glamo_mci_host *host, struct
-mmc_request *mrq) {
+static void glamo_mci_request_done(struct glamo_mci_host *host,
+				   struct mmc_request *mrq)
+{
 	mod_timer(&host->disable_timer, jiffies + HZ / 16);
+	/*
+	 * disable the initial slow start after first bulk transfer
+	 */
 	mmc_request_done(host->mmc, mrq);
 }
 
 
 static void glamo_mci_irq_worker(struct work_struct *work)
 {
-	struct glamo_mci_host *host = container_of(work, struct glamo_mci_host,
-												irq_work);
+	struct glamo_mci_host *host =
+			container_of(work, struct glamo_mci_host, irq_work);
+
 	struct mmc_command *cmd;
 	uint16_t status;
+
 	if (!host->mrq || !host->mrq->cmd)
 		return;
 
 	cmd = host->mrq->cmd;
 
-#if 0
-	if (cmd->data->flags & MMC_DATA_READ) {
-		return;
-	}
-#endif
-
 	status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
 	dev_dbg(&host->pdev->dev, "status = 0x%04x\n", status);
 
-	/* we ignore a data timeout report if we are also told the data came */
+	/* We ignore a data timeout report if we are also told the data came. */
 	if (status & GLAMO_STAT1_MMC_RB_DRDY)
 		status &= ~GLAMO_STAT1_MMC_DTOUT;
 
 	if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT))
 		cmd->error = -ETIMEDOUT;
-	if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) {
+
+	if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR))
 		cmd->error = -EILSEQ;
-	}
+
 	if (cmd->error) {
 		dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
 		goto done;
 	}
 
-	/* issue STOP if we have been given one to use */
-	if (host->mrq->stop) {
+	/* Issue STOP if we have been given one to use. */
+	if (host->mrq->stop)
 		glamo_mci_send_command(host, host->mrq->stop);
-	}
 
 	if (cmd->data->flags & MMC_DATA_READ)
 		do_pio_read(host, cmd->data);
@@ -271,8 +307,9 @@
 
 static void glamo_mci_read_worker(struct work_struct *work)
 {
-	struct glamo_mci_host *host = container_of(work, struct glamo_mci_host,
-												read_work);
+	struct glamo_mci_host *host =
+			container_of(work, struct glamo_mci_host, read_work);
+
 	struct mmc_command *cmd;
 	uint16_t status;
 	uint16_t blocks_ready;
@@ -293,7 +330,8 @@
 		if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR))
 			cmd->error = -EILSEQ;
 		if (cmd->error) {
-			dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
+			dev_info(&host->pdev->dev,
+				 "Error after cmd: 0x%x\n", status);
 			goto done;
 		}
 
@@ -303,16 +341,19 @@
 		if (data_ready == data_read)
 			yield();
 
-		while(sg && data_read + sg->length <= data_ready) {
-			sg_pointer = page_address(sg_page(sg)) + sg->offset;
+		while (sg && data_read + sg->length <= data_ready) {
+			sg_pointer = kmap(sg_page(sg)) + sg->offset;
+
 			memcpy(sg_pointer, from_ptr, sg->length);
+			kunmap(sg_page(sg));
 			from_ptr += sg->length >> 1;
 
 			data_read += sg->length;
 			sg = sg_next(sg);
 		}
 
-	} while(sg);
+	} while (sg);
+
 	cmd->data->bytes_xfered = data_read;
 
 	do {
@@ -326,20 +367,20 @@
 		status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
 	} while (!(status & GLAMO_STAT1_MMC_IDLE));
 done:
-	host->mrq = NULL;
 	glamo_mci_request_done(host, cmd->mrq);
+	host->mrq = NULL;
 }
 
 static irqreturn_t glamo_mci_irq(int irq, void *devid)
 {
-	struct glamo_mci_host *host = (struct glamo_mci_host*)devid;
+	struct glamo_mci_host *host = (struct glamo_mci_host *)devid;
 	schedule_work(&host->irq_work);
 
 	return IRQ_HANDLED;
 }
 
 static void glamo_mci_send_command(struct glamo_mci_host *host,
-				  struct mmc_command *cmd)
+				   struct mmc_command *cmd)
 {
 	u8 u8a[6];
 	u16 fire = 0;
@@ -361,7 +402,8 @@
 	u8a[2] = (u8)(cmd->arg >> 16);
 	u8a[3] = (u8)(cmd->arg >> 8);
 	u8a[4] = (u8)cmd->arg;
-	u8a[5] = (crc7(0, u8a, 5) << 1) | 0x01; /* crc7 on first 5 bytes of packet */
+	/* crc7 on first 5 bytes of packet */
+	u8a[5] = (crc7(0, u8a, 5) << 1) | 0x01;
 
 	/* issue the wire-order array including CRC in register order */
 	glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG1, ((u8a[4] << 8) | u8a[5]));
@@ -429,23 +471,25 @@
 	case SD_APP_SEND_SCR:
 	case MMC_READ_MULTIPLE_BLOCK:
 		/* we will get an interrupt off this */
-		if (!cmd->mrq->stop)
+		if (!cmd->mrq->stop) {
 			/* multiblock no stop */
 			fire |= GLAMO_FIRE_MMC_CC_MBRNS;
-		else
+		} else {
 			 /* multiblock with stop */
 			fire |= GLAMO_FIRE_MMC_CC_MBRS;
+		}
 		break;
 	case MMC_WRITE_BLOCK:
 		fire |= GLAMO_FIRE_MMC_CC_SBW; /* single block write */
 		break;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		if (cmd->mrq->stop)
+		if (cmd->mrq->stop) {
 			 /* multiblock with stop */
 			fire |= GLAMO_FIRE_MMC_CC_MBWS;
-		else
+		} else {
 			 /* multiblock NO stop-- 'RESERVED'? */
 			fire |= GLAMO_FIRE_MMC_CC_MBWNS;
+		}
 		break;
 	case MMC_STOP_TRANSMISSION:
 		fire |= GLAMO_FIRE_MMC_CC_STOP; /* STOP */
@@ -503,24 +547,26 @@
 	if (cmd->flags & MMC_RSP_PRESENT) {
 		if (cmd->flags & MMC_RSP_136) {
 			cmd->resp[3] = readw(&reg_resp[0]) |
-						   (readw(&reg_resp[1]) << 16);
+					     (readw(&reg_resp[1]) << 16);
 			cmd->resp[2] = readw(&reg_resp[2]) |
-						   (readw(&reg_resp[3]) << 16);
+					     (readw(&reg_resp[3]) << 16);
 			cmd->resp[1] = readw(&reg_resp[4]) |
-						   (readw(&reg_resp[5]) << 16);
+					     (readw(&reg_resp[5]) << 16);
 			cmd->resp[0] = readw(&reg_resp[6]) |
-						   (readw(&reg_resp[7]) << 16);
+					     (readw(&reg_resp[7]) << 16);
 		} else {
 			cmd->resp[0] = (readw(&reg_resp[0]) >> 8) |
-						   (readw(&reg_resp[1]) << 8) |
-						   ((readw(&reg_resp[2])) << 24);
+					(readw(&reg_resp[1]) << 8) |
+					((readw(&reg_resp[2])) << 24);
 		}
 	}
 
 #if 0
-	/* We'll only get an interrupt when all data has been transfered.
-	   By starting to copy data when it's avaiable we can increase throughput by
-	   up to 30%. */
+	/*
+	 * We'll only get an interrupt when all data has been transfered.
+	 * By starting to copy data when it's avaiable we can increase
+	 * throughput by up to 30%.
+	 */
 	if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
 		schedule_work(&host->read_work);
 #endif
@@ -537,9 +583,8 @@
 	data->bytes_xfered = 0;
 
 	/* if write, prep the write into the shared RAM before the command */
-	if (data->flags & MMC_DATA_WRITE) {
+	if (data->flags & MMC_DATA_WRITE)
 		do_pio_write(host, data);
-	}
 
 	dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n",
 				   data->blksz, data->blocks);
@@ -547,22 +592,22 @@
 }
 
 static int glamo_mci_irq_poll(struct glamo_mci_host *host,
-				struct mmc_command *cmd)
+			      struct mmc_command *cmd)
 {
 	int timeout = 1000000;
 	/*
-	 * if the glamo INT# line isn't wired (*cough* it can happen)
+	 * If the glamo INT# line isn't wired (*cough* it can happen)
 	 * I'm afraid we have to spin on the IRQ status bit and "be
-	 * our own INT# line"
+	 * our own INT# line".
 	 */
+
 	/*
 	 * we have faith we will get an "interrupt"...
 	 * but something insane like suspend problems can mean
 	 * we spin here forever, so we timeout after a LONG time
 	 */
-	while ((!(readw(host->core->base +
-		 GLAMO_REG_IRQ_STATUS) & GLAMO_IRQ_MMC)) &&
-		   (timeout--));
+	while ((!(readw(host->core->base + GLAMO_REG_IRQ_STATUS) &
+		GLAMO_IRQ_MMC)) && (timeout--));
 
 	if (timeout < 0) {
 		if (cmd->data->error)
@@ -570,9 +615,10 @@
 		dev_err(&host->pdev->dev, "Payload timeout\n");
 		return -ETIMEDOUT;
 	}
+
 	/* ack this interrupt source */
 	writew(GLAMO_IRQ_MMC, host->core->base +
-		   GLAMO_REG_IRQ_CLEAR);
+	       GLAMO_REG_IRQ_CLEAR);
 
 	/* yay we are an interrupt controller! -- call the ISR
 	 * it will stop clock to card
@@ -582,7 +628,8 @@
 	return 0;
 }
 
-static void glamo_mci_send_request(struct mmc_host *mmc, struct mmc_request *mrq)
+static void glamo_mci_send_request(struct mmc_host *mmc,
+				   struct mmc_request *mrq)
 {
 	struct glamo_mci_host *host = mmc_priv(mmc);
 	struct mmc_command *cmd = mrq->cmd;
@@ -590,17 +637,17 @@
 	glamo_mci_clock_enable(host);
 	host->request_counter++;
 	if (cmd->data) {
-		if(glamo_mci_prepare_pio(host, cmd->data)) {
+		if (glamo_mci_prepare_pio(host, cmd->data)) {
 			cmd->error = -EIO;
 			cmd->data->error = -EIO;
 			goto done;
 		}
 	}
 
-	dev_dbg(&host->pdev->dev,"cmd 0x%x, "
-		 "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
-		 cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop,
-		 cmd->flags);
+	dev_dbg(&host->pdev->dev, "cmd 0x%x, "
+		"arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
+		cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop,
+		cmd->flags);
 
 	glamo_mci_send_command(host, cmd);
 
@@ -629,18 +676,20 @@
 }
 
 static void glamo_mci_set_power_mode(struct glamo_mci_host *host,
-				unsigned char power_mode) {
+				     unsigned char power_mode)
+{
 	int ret;
 
 	if (power_mode == host->power_mode)
 		return;
 
-	switch(power_mode) {
+	switch (power_mode) {
 	case MMC_POWER_UP:
 		if (host->power_mode == MMC_POWER_OFF) {
 			ret = regulator_enable(host->regulator);
 			if (ret)
-				dev_err(&host->pdev->dev, "Failed to enable regulator: %d\n", ret);
+				dev_err(&host->pdev->dev,
+					"Failed to enable regulator: %d\n", ret);
 		}
 		break;
 	case MMC_POWER_ON:
@@ -652,10 +701,13 @@
 
 		ret = regulator_disable(host->regulator);
 		if (ret)
-			dev_warn(&host->pdev->dev, "Failed to disable regulator: %d\n", ret);
+			dev_warn(&host->pdev->dev,
+				 "Failed to disable regulator: %d\n", ret);
 		break;
 	}
 	host->power_mode = power_mode;
+	if (host->force_slow_during_powerup)
+		mdelay(1);
 }
 
 static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -672,11 +724,26 @@
 	if (host->vdd != ios->vdd) {
 		ret = mmc_regulator_set_ocr(host->regulator, ios->vdd);
 		if (ret)
-			dev_err(&host->pdev->dev, "Failed to set regulator voltage: %d\n", ret);
+			dev_err(&host->pdev->dev,
+				"Failed to set regulator voltage: %d\n", ret);
 		else
 			host->vdd = ios->vdd;
 	}
-	rate = glamo_mci_set_card_clock(host, ios->clock);
+
+	if ((ios->power_mode == MMC_POWER_ON) ||
+	    (ios->power_mode == MMC_POWER_UP)) {
+		/*
+		 * we should use very slow clock until first bulk
+		 * transfer completes OK
+		 */
+		host->force_slow_during_powerup = 1;
+		dev_dbg(&host->pdev->dev, "Set slow during power up\n");
+	}
+
+	if (host->force_slow_during_powerup)
+		rate = glamo_mci_set_card_clock(host, 1000000);
+	else
+		rate = glamo_mci_set_card_clock(host, ios->clock);
 
 	if ((ios->power_mode == MMC_POWER_ON) ||
 	    (ios->power_mode == MMC_POWER_UP)) {
@@ -694,12 +761,13 @@
 		bus_width = GLAMO_BASIC_MMC_EN_4BIT_DATA;
 
 	sd_drive = (rate * 4) / host->clk_rate;
+
 	if (sd_drive > 3)
 		sd_drive = 3;
 
 	glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC,
-					       GLAMO_BASIC_MMC_EN_4BIT_DATA | 0xb0,
-						   bus_width | sd_drive << 6);
+			       GLAMO_BASIC_MMC_EN_4BIT_DATA | 0xb0,
+			       bus_width | sd_drive << 6);
 }
 
 
@@ -754,25 +822,25 @@
 	host->mmio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!host->mmio_mem) {
 		dev_err(&pdev->dev,
-			"failed to get io memory region resouce.\n");
+			"Failed to get io memory region resouce.\n");
 		ret = -ENOENT;
 		goto probe_regulator_put;
 	}
 
 	host->mmio_mem = request_mem_region(host->mmio_mem->start,
-	                                    resource_size(host->mmio_mem),
-	                                    pdev->name);
+					    resource_size(host->mmio_mem),
+					    pdev->name);
 
 	if (!host->mmio_mem) {
-		dev_err(&pdev->dev, "failed to request io memory region.\n");
+		dev_err(&pdev->dev, "Failed to request io memory region.\n");
 		ret = -ENOENT;
 		goto probe_regulator_put;
 	}
 
 	host->mmio_base = ioremap(host->mmio_mem->start,
-	                          resource_size(host->mmio_mem));
+				  resource_size(host->mmio_mem));
 	if (!host->mmio_base) {
-		dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
+		dev_err(&pdev->dev, "Failed to ioremap() io memory region.\n");
 		ret = -EINVAL;
 		goto probe_free_mem_region_mmio;
 	}
@@ -788,27 +856,28 @@
 	}
 
 	host->data_mem = request_mem_region(host->data_mem->start,
-	                                    resource_size(host->data_mem),
-										pdev->name);
+					    resource_size(host->data_mem),
+					    pdev->name);
 
 	if (!host->data_mem) {
-		dev_err(&pdev->dev, "failed to request io memory region.\n");
+		dev_err(&pdev->dev, "Failed to request io memory region.\n");
 		ret = -ENOENT;
 		goto probe_iounmap_mmio;
 	}
+
 	host->data_base = ioremap(host->data_mem->start,
-	                          resource_size(host->data_mem));
+				  resource_size(host->data_mem));
 
 	if (host->data_base == 0) {
-		dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
+		dev_err(&pdev->dev, "Failed to ioremap() io memory region.\n");
 		ret = -EINVAL;
 		goto probe_free_mem_region_data;
 	}
 
-	ret = request_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), glamo_mci_irq, IRQF_SHARED,
-	               pdev->name, host);
+	ret = request_irq(IRQ_GLAMO_MMC,
+			  glamo_mci_irq, IRQF_SHARED, pdev->name, host);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register irq.\n");
+		dev_err(&pdev->dev, "Failed to register irq.\n");
 		goto probe_iounmap_data;
 	}
 
@@ -817,23 +886,24 @@
 	host->clk_rate = glamo_pll_rate(host->core, GLAMO_PLL1);
 
 	/* explain our host controller capabilities */
-	mmc->ops       = &glamo_mci_ops;
-	mmc->ocr_avail = mmc_regulator_get_ocrmask(host->regulator);
-	mmc->caps      = MMC_CAP_4_BIT_DATA |
-	                 MMC_CAP_MMC_HIGHSPEED |
-	                 MMC_CAP_SD_HIGHSPEED;
-	mmc->f_min     = host->clk_rate / 256;
-	mmc->f_max     = sd_max_clk;
+	mmc->ops	= &glamo_mci_ops;
+	mmc->ocr_avail	= mmc_regulator_get_ocrmask(host->regulator);
+	mmc->caps	= MMC_CAP_4_BIT_DATA |
+			  MMC_CAP_MMC_HIGHSPEED |
+			  MMC_CAP_SD_HIGHSPEED;
+	mmc->f_min	= host->clk_rate / 256;
+	mmc->f_max	= sd_max_clk;
 
 	mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
 	mmc->max_blk_size  = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
-	mmc->max_req_size  = resource_size(host->data_mem);
+	mmc->max_req_size  = resource_size(host->data_mem) / 2;
 	mmc->max_seg_size  = mmc->max_req_size;
 	mmc->max_phys_segs = 128;
 	mmc->max_hw_segs   = 128;
 
 	if (mmc->ocr_avail < 0) {
-		dev_warn(&pdev->dev, "Failed to get ocr list for regulator: %d.\n",
+		dev_warn(&pdev->dev,
+			 "Failed to get ocr list for regulator: %d.\n",
 				mmc->ocr_avail);
 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 	}
@@ -844,9 +914,11 @@
 	glamo_engine_reset(host->core, GLAMO_ENGINE_MMC);
 
 	glamo_reg_write(host, GLAMO_REG_MMC_WDATADS1,
-			(u16)(host->data_mem->start));
+			(u16)(host->data_mem->start +
+			      (resource_size(host->data_mem) / 2)));
 	glamo_reg_write(host, GLAMO_REG_MMC_WDATADS2,
-			(u16)(host->data_mem->start >> 16));
+			(u16)((host->data_mem->start +
+			      (resource_size(host->data_mem) / 2)) >> 16));
 
 	glamo_reg_write(host, GLAMO_REG_MMC_RDATADS1,
 			(u16)(host->data_mem->start));
@@ -854,14 +926,15 @@
 			(u16)(host->data_mem->start >> 16));
 
 	setup_timer(&host->disable_timer, glamo_mci_disable_timer,
-				(unsigned long)host);
+		    (unsigned long) host);
 
-	if ((ret = mmc_add_host(mmc))) {
+	ret = mmc_add_host(mmc);
+	if (ret) {
 		dev_err(&pdev->dev, "failed to add mmc host.\n");
 		goto probe_freeirq;
 	}
 
-	dev_info(&pdev->dev,"initialisation done.\n");
+	dev_info(&pdev->dev, "Initialisation done.\n");
 	return 0;
 
 probe_freeirq:
@@ -869,11 +942,13 @@
 probe_iounmap_data:
 	iounmap(host->data_base);
 probe_free_mem_region_data:
-	release_mem_region(host->data_mem->start, resource_size(host->data_mem));
+	release_mem_region(host->data_mem->start,
+			   resource_size(host->data_mem));
 probe_iounmap_mmio:
 	iounmap(host->mmio_base);
 probe_free_mem_region_mmio:
-	release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
+	release_mem_region(host->mmio_mem->start,
+			   resource_size(host->mmio_mem));
 probe_regulator_put:
 	regulator_put(host->regulator);
 probe_free_host:
@@ -892,8 +967,11 @@
 	mmc_remove_host(mmc);
 	iounmap(host->mmio_base);
 	iounmap(host->data_base);
-	release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
-	release_mem_region(host->data_mem->start, resource_size(host->data_mem));
+
+	release_mem_region(host->mmio_mem->start,
+			   resource_size(host->mmio_mem));
+	release_mem_region(host->data_mem->start,
+			   resource_size(host->data_mem));
 
 	regulator_put(host->regulator);
 
@@ -930,9 +1008,12 @@
 	glamo_engine_reset(host->core, GLAMO_ENGINE_MMC);
 
 	glamo_reg_write(host, GLAMO_REG_MMC_WDATADS1,
-			(u16)(host->data_mem->start));
+			(u16)(host->data_mem->start +
+			      (resource_size(host->data_mem) / 2)));
+
 	glamo_reg_write(host, GLAMO_REG_MMC_WDATADS2,
-			(u16)(host->data_mem->start >> 16));
+			(u16)((host->data_mem->start +
+			      (resource_size(host->data_mem) / 2)) >> 16));
 
 	glamo_reg_write(host, GLAMO_REG_MMC_RDATADS1,
 			(u16)(host->data_mem->start));
@@ -957,8 +1038,7 @@
 #endif /* CONFIG_PM */
 
 
-static struct platform_driver glamo_mci_driver =
-{
+static struct platform_driver glamo_mci_driver = {
 	.probe  = glamo_mci_probe,
 	.remove = glamo_mci_remove,
 	.driver = {

Reply via email to