Re: [PATCH v2 14/16] mmc: block: Implement HPI invocation and handling logic.

2012-05-09 Thread Arnd Bergmann
On Wednesday 09 May 2012, kdorf...@codeaurora.org wrote:
  +static bool mmc_can_do_foreground_hpi(struct mmc_queue *mq,
  + struct request *req, unsigned int thpi)
  +{
  +
  + /*
  +  * If some time has elapsed since the issuing of previous write
  +  * command, or if the size of the request was too small, there's
  +  * no point in preempting it. Check if it's worthwhile to preempt
  +  */
  + int time_elapsed = jiffies_to_msecs(jiffies -
  + mq-mqrq_cur-mmc_active.mrq-cmd-started_time);
  +
  + if (time_elapsed = thpi)
  + return true;
 Some host controllers (or DMA) has possibility to get the byte count of
 current transaction. It may be implemented as host api (similar to abort
 ops). Then you have more accurate estimation of worthiness.

I'm rather sure that the byte count is not relevant here: it's not
the actual write that is taking so long, it's the garbage collection
that the device does internally before the write actually gets done.
The data transfer is much faster than the time we are waiting for here.

Arnd
--
To unsubscribe from this list: send the line unsubscribe linux-omap in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 14/16] mmc: block: Implement HPI invocation and handling logic.

2012-05-09 Thread S, Venkatraman
On Wed, May 9, 2012 at 2:05 PM,  kdorf...@codeaurora.org wrote:

 +static bool mmc_can_do_foreground_hpi(struct mmc_queue *mq,
 +                     struct request *req, unsigned int thpi)
 +{
 +
 +     /*
 +      * If some time has elapsed since the issuing of previous write
 +      * command, or if the size of the request was too small, there's
 +      * no point in preempting it. Check if it's worthwhile to preempt
 +      */
 +     int time_elapsed = jiffies_to_msecs(jiffies -
 +                     mq-mqrq_cur-mmc_active.mrq-cmd-started_time);
 +
 +     if (time_elapsed = thpi)
 +                     return true;
 Some host controllers (or DMA) has possibility to get the byte count of
 current transaction. It may be implemented as host api (similar to abort
 ops). Then you have more accurate estimation of worthiness.


Byte count returned by DMA or the HC doesn't mean that the data has
actually been
burnt into the device (due to internal buffering). This is one of the
reasons for
defining the CORRECTLY_PRG_SECTORS_NUM register in the standard which
can be queried to find how much was correctly written.
 Unfortunately it can only be queried after the abort has been issued.

 +
 +     return false;
 +}

 Thanks, Kostya

--
To unsubscribe from this list: send the line unsubscribe linux-omap in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 14/16] mmc: block: Implement HPI invocation and handling logic.

2012-05-03 Thread Venkatraman S
Intercept command which require high priority treatment.
If the ongoing command can be preempted according to JEDEC HPI
definition and sufficient window exist to complete an ongoing
request, invoke HPI and abort the current request, and issue
the high priority request.
Otherwise, process the command normally.

Signed-off-by: Venkatraman S svenk...@ti.com
---
 drivers/mmc/card/block.c |  131 +++---
 drivers/mmc/card/queue.h |1 +
 2 files changed, 124 insertions(+), 8 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 11833e4..3dd662b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1276,7 +1276,7 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, 
struct mmc_card *card,
return ret;
 }
 
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+static int mmc_blk_execute_rw_rq(struct mmc_queue *mq, struct request *rqc)
 {
struct mmc_blk_data *md = mq-data;
struct mmc_card *card = md-queue.card;
@@ -1285,22 +1285,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
struct request *req;
-   struct mmc_async_req *areq;
+   struct mmc_async_req *prev_req, *cur_req;
 
if (!rqc  !mq-mqrq_prev-req)
return 0;
 
+   mq-mqrq_interrupted = NULL;
+
do {
if (rqc) {
mmc_blk_rw_rq_prep(mq-mqrq_cur, card, 0, mq);
-   areq = mq-mqrq_cur-mmc_active;
-   } else
-   areq = NULL;
-   areq = mmc_start_req(card-host, areq, (int *) status);
-   if (!areq)
+   cur_req = mq-mqrq_cur-mmc_active;
+   } else {
+   cur_req = NULL;
+   }
+   prev_req = mmc_start_req(card-host, cur_req, (int *) status);
+   if (!prev_req)
return 0;
 
-   mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+   if (cur_req 
+   cur_req-mrq-cmd-cmd_attr  MMC_CMD_PREEMPTIBLE) {
+   mq-mqrq_interrupted = mq-mqrq_cur;
+   }
+
+   mq_rq = container_of(prev_req,
+   struct mmc_queue_req, mmc_active);
brq = mq_rq-brq;
req = mq_rq-req;
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
@@ -1406,6 +1415,112 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, 
struct request *rqc)
return 0;
 }
 
+#define HPI_CHECK  (REQ_RW_SWAPIN | REQ_RW_DMPG)
+
+static bool mmc_can_do_foreground_hpi(struct mmc_queue *mq,
+   struct request *req, unsigned int thpi)
+{
+
+   /*
+* If some time has elapsed since the issuing of previous write
+* command, or if the size of the request was too small, there's
+* no point in preempting it. Check if it's worthwhile to preempt
+*/
+   int time_elapsed = jiffies_to_msecs(jiffies -
+   mq-mqrq_cur-mmc_active.mrq-cmd-started_time);
+
+   if (time_elapsed = thpi)
+   return true;
+
+   return false;
+}
+
+/*
+ * When a HPI command had been given for a foreground
+ * request, the host controller will finish the request,
+ * the completion request has to be handled differently
+ */
+static struct mmc_async_req *mmc_handle_aborted_request(struct mmc_queue *mq,
+   int hpi_err)
+{
+   struct mmc_async_req *areq;
+   struct mmc_request *mrq;
+   struct mmc_queue_req *mq_rq;
+   struct mmc_blk_data *md = mq-data;
+   struct request *req;
+
+   BUG_ON(!mq-mqrq_interrupted);
+
+   areq = mq-mqrq_interrupted-mmc_active;
+   mrq = areq-mrq;
+
+   /* Error checking is TBD */
+   mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+   req = mq_rq-req;
+   mmc_queue_bounce_post(mq_rq);
+
+   spin_lock_irq(md-lock);
+   /*
+* TODO. Do the error translation as done in
+* blk_err_check here and propogate
+* the partial transfer status if applicable
+*/
+   __blk_end_request(req, -EIO, 0);
+   spin_unlock_irq(md-lock);
+   return areq;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+   int ret;
+   struct mmc_blk_data *md = mq-data;
+   struct mmc_card *card = md-queue.card;
+   struct mmc_async_req *areq;
+
+   if (req  md-flags  MMC_HPI_SUPPORT) {
+   if (!((req-cmd_flags  HPI_CHECK)  mq-mqrq_interrupted))
+   goto no_preempt;
+   if (!mmc_can_do_foreground_hpi(mq, req,
+   card-preempt_time_threshold))
+   goto no_preempt;
+
+   pr_debug(Pre-empting ongoing request %pK\n,
+