RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-12-02 Thread Seungwon Jeon
Maya Erez wrote:
  Maya Erez wrote:
On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon
  tgih@samsung.com
   wrote:
 @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct
  mmc_card
   *card,
        if (!brq-data.bytes_xfered)
                return MMC_BLK_RETRY;

 +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
 +               if (unlikely(brq-data.blocks  9 !=
   brq-data.bytes_xfered))
 +                       return MMC_BLK_PARTIAL;
 +               else
 +                       return MMC_BLK_SUCCESS;
 +       }
 +
        if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                return MMC_BLK_PARTIAL;

        return MMC_BLK_SUCCESS;
  }

 +static int mmc_blk_packed_err_check(struct mmc_card *card, +
                         struct mmc_async_req *areq)
 +{
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct
   mmc_queue_req,
 +                                                   mmc_active);
  +
        int err, check, status;
 +       u8 ext_csd[512];
 +
 +       check = mmc_blk_err_check(card, areq);
 +
 +       if (check == MMC_BLK_SUCCESS)
 +               return check;
  I think we need to check the status for all cases and not only in case
  of
  MMC_BLK_PARTIAL. For example, in cases where the header was traferred
  successfully but had logic errors (wrong number of sectors etc.)
  mmc_blk_err_check will return MMC_BLK_SUCCESS although the packed
  commands
  failed.
  Similarly, Sahitya Tummala is already mentioned this.
  Other error case will be checked in next version.
  The case you suggested is about read or write?
  Device may detect error and stop transferring the data.
 Sahitya suggested to also check other error cases that mmc_blk_err_check
 returns (such as MMC_BLK_CMD_ERR, MMC_BLK_ECC_ERR and MMC_BLK_DATA_ERR).
 I suggest to also check the exception bit in the status even if
 mmc_blk_err_check returned success, since mmc_blk_err_check might not
 catch all the packed commands failures. One example for such a failure is
 when the header of read packed commands will have logical error.
This part is modified in next version.

Thanks,
Seungwon Jeon.

 Thanks,
 Maya
 --
 Seny by a Consultant for Qualcomm innovation center, Inc.
 Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
 
 
 
 --
 To unsubscribe from this list: send the line unsubscribe linux-mmc in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-12-01 Thread merez
 Maya Erez wrote:
   On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon
 tgih@samsung.com
  wrote:
@@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct
 mmc_card
  *card,
       if (!brq-data.bytes_xfered)
               return MMC_BLK_RETRY;
   
+       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
+               if (unlikely(brq-data.blocks  9 !=
  brq-data.bytes_xfered))
+                       return MMC_BLK_PARTIAL;
+               else
+                       return MMC_BLK_SUCCESS;
+       }
+
       if (blk_rq_bytes(req) != brq-data.bytes_xfered)
               return MMC_BLK_PARTIAL;
   
       return MMC_BLK_SUCCESS;
 }
   
+static int mmc_blk_packed_err_check(struct mmc_card *card, +
                        struct mmc_async_req *areq)
+{
+       struct mmc_queue_req *mq_mrq = container_of(areq, struct
  mmc_queue_req,
+                                                   mmc_active);
 +
       int err, check, status;
+       u8 ext_csd[512];
+
+       check = mmc_blk_err_check(card, areq);
+
+       if (check == MMC_BLK_SUCCESS)
+               return check;
 I think we need to check the status for all cases and not only in case
 of
 MMC_BLK_PARTIAL. For example, in cases where the header was traferred
 successfully but had logic errors (wrong number of sectors etc.)
 mmc_blk_err_check will return MMC_BLK_SUCCESS although the packed
 commands
 failed.
 Similarly, Sahitya Tummala is already mentioned this.
 Other error case will be checked in next version.
 The case you suggested is about read or write?
 Device may detect error and stop transferring the data.
Sahitya suggested to also check other error cases that mmc_blk_err_check
returns (such as MMC_BLK_CMD_ERR, MMC_BLK_ECC_ERR and MMC_BLK_DATA_ERR).
I suggest to also check the exception bit in the status even if
mmc_blk_err_check returned success, since mmc_blk_err_check might not
catch all the packed commands failures. One example for such a failure is
when the header of read packed commands will have logical error.

Thanks,
Maya
--
Seny by a Consultant for Qualcomm innovation center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum



--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-28 Thread Seungwon Jeon
Maya Erez wrote:
   On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com
  wrote:
@@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card
  *card,
        * kind.  If it was a write, we may have transitioned to
       * program mode, which we have to wait for it to complete.
       */
-       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) !=
  READ) {
+       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) !=
  READ) ||
+                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR))
 Since the header's direction is WRITE I don't think we also need to check
 if mq_mrq-packed_cmd == MMC_PACKED_WR_HDR since it will be covered by the
 original condition.
The header is written. But origin request is about read.
That means rq_data_dir(req) is READ. So new condition is needed.
  {
               u32 status;
               do {
                       int err = get_card_status(card, status,
 5);
 A general question, not related specifically to packed commands - Do you
 know why the status is not checked to see which error we got?
This status is for checking whether the card is ready for new data.
@@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card
  *card,
       if (!brq-data.bytes_xfered)
               return MMC_BLK_RETRY;
   
+       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
+               if (unlikely(brq-data.blocks  9 !=
  brq-data.bytes_xfered))
+                       return MMC_BLK_PARTIAL;
+               else
+                       return MMC_BLK_SUCCESS;
+       }
+
       if (blk_rq_bytes(req) != brq-data.bytes_xfered)
               return MMC_BLK_PARTIAL;
   
       return MMC_BLK_SUCCESS;
 }
   
+static int mmc_blk_packed_err_check(struct mmc_card *card, +
                        struct mmc_async_req *areq)
+{
+       struct mmc_queue_req *mq_mrq = container_of(areq, struct
  mmc_queue_req,
+                                                   mmc_active); +
       int err, check, status;
+       u8 ext_csd[512];
+
+       check = mmc_blk_err_check(card, areq);
+
+       if (check == MMC_BLK_SUCCESS)
+               return check;
 I think we need to check the status for all cases and not only in case of
 MMC_BLK_PARTIAL. For example, in cases where the header was traferred
 successfully but had logic errors (wrong number of sectors etc.)
 mmc_blk_err_check will return MMC_BLK_SUCCESS although the packed commands
 failed.
Similarly, Sahitya Tummala is already mentioned this.
Other error case will be checked in next version.
The case you suggested is about read or write?
Device may detect error and stop transferring the data.
+
+       if (check == MMC_BLK_PARTIAL) {
+               err = get_card_status(card, status, 0);
+               if (err)
+                       return MMC_BLK_ABORT;
+
+               if (status  R1_EXP_EVENT) {
+                       err = mmc_send_ext_csd(card, ext_csd); +
                     if (err)
+                               return MMC_BLK_ABORT;
+
+                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS +
 0]
 why do we need the + 0?
It is explicit expression, no more, no less.
Actually, there is no need.
  
+
  EXT_CSD_PACKED_INDEXED_ERROR) {
+                                       /* Make be 0-based */
 The comment is not understood
PACKED_FAILURE_INDEX starts from '1'
It is converted to 0-base for use.
+                                       mq_mrq-packed_fail_idx =
 +
 Thanks,
 Maya Erez
 --
 Seny by a Consultant for Qualcomm innovation center, Inc.
 Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
 
 
 --
 To unsubscribe from this list: send the line unsubscribe linux-mmc in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-27 Thread merez
  On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com
 wrote:
   @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card
 *card,
           * kind.  If it was a write, we may have transitioned to  
      * program mode, which we have to wait for it to complete.  
      */
   -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) !=
 READ) {
   +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) !=
 READ) ||
   +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR))
Since the header's direction is WRITE I don't think we also need to check
if mq_mrq-packed_cmd == MMC_PACKED_WR_HDR since it will be covered by the
original condition.
 {
                  u32 status;
                  do {
                          int err = get_card_status(card, status,
5);
A general question, not related specifically to packed commands - Do you
know why the status is not checked to see which error we got?
   @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card
 *card,
          if (!brq-data.bytes_xfered)
                  return MMC_BLK_RETRY;
  
   +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
   +               if (unlikely(brq-data.blocks  9 !=
 brq-data.bytes_xfered))
   +                       return MMC_BLK_PARTIAL;
   +               else
   +                       return MMC_BLK_SUCCESS;
   +       }
   +
          if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                  return MMC_BLK_PARTIAL;
  
          return MMC_BLK_SUCCESS;
    }
  
   +static int mmc_blk_packed_err_check(struct mmc_card *card, +    
                       struct mmc_async_req *areq)
   +{
   +       struct mmc_queue_req *mq_mrq = container_of(areq, struct
 mmc_queue_req,
   +                                                   mmc_active); +
      int err, check, status;
   +       u8 ext_csd[512];
   +
   +       check = mmc_blk_err_check(card, areq);
   +
   +       if (check == MMC_BLK_SUCCESS)
   +               return check;
I think we need to check the status for all cases and not only in case of
MMC_BLK_PARTIAL. For example, in cases where the header was traferred
successfully but had logic errors (wrong number of sectors etc.)
mmc_blk_err_check will return MMC_BLK_SUCCESS although the packed commands
failed.
   +
   +       if (check == MMC_BLK_PARTIAL) {
   +               err = get_card_status(card, status, 0);
   +               if (err)
   +                       return MMC_BLK_ABORT;
   +
   +               if (status  R1_EXP_EVENT) {
   +                       err = mmc_send_ext_csd(card, ext_csd); +  
                    if (err)
   +                               return MMC_BLK_ABORT;
   +
   +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS +
0]
why do we need the + 0?
 
   +                                              
 EXT_CSD_PACKED_INDEXED_ERROR) {
   +                                       /* Make be 0-based */
The comment is not understood
   +                                       mq_mrq-packed_fail_idx =
+                                              
Thanks,
Maya Erez
--
Seny by a Consultant for Qualcomm innovation center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum


--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-17 Thread merez
 Maya Erez wrote:
  +               phys_segments +=  next-nr_phys_segments;
  +               if (phys_segments  max_phys_segs) {
  +                       blk_requeue_request(q, next);
  +                       break;
  +               }
 I mentioned this before - if the next request is not
 packable
  and
requeued,
 blk_fetch_request will retrieve it again and this while loop
  will
   never terminate.

 If next request is not packable, it is requeued and 'break'
   terminates
this loop.
 So it not infinite.
Right !! But that doesn't help finding the commands that are
  packable.
   Ideally, you'd need to pack all neighbouring requests into one
 packed
   command.
The way CFQ works, it is not necessary that the fetch would
 return
  all
   outstanding
requests that are packable (unless you invoke a forced
 dispatch)
  It
   would be good to see some numbers on the number of pack hits /
   misses
that
you would encounter with this logic, on a typical usecase.
Is it considered only for CFQ scheduler? How about other I/O
  scheduler?
   If all requests are drained from scheduler queue forcedly,
the number of candidate to be packed can be increased.
However we may lose the unique CFQ's strength and MMC D/D may
 take
  the
   CFQ's duty.
Basically, this patch accommodates the origin order requests
 from
  I/O
   scheduler.
   
  
   In order to better utilize the packed commands feature and achieve
  the
   best performance improvements I think that the command packing
 should
  be
   done in the block layer, according to the scheduler policy.
   That is, the scheduler should be aware of the capability of the
  device to
   receive a request list and its constrains (such as maximum number
 of
   requests, max number of sectors etc) and use this information as a
   factor
   to its algorithm.
   This way you keep the decision making in the hands of the
 scheduler
  while
   the MMC layer will only have to send this list as a packed
 command.
  
   Yes, it would be another interesting approach.
   Command packing you mentioned means gathering request among same
  direction(read/write)?
   Currently I/O scheduler may know device constrains which MMC driver
  informs
   with the exception of order information for packed command.
   But I think the dependency of I/O scheduler may be able to come up.
   How can MMC layer treat packed command with I/O scheduler which
  doesn't support this?
 
  The very act of packing presumes some sorting and re-ordering at the
  I/O scheduler level.
  When no such sorting is done (ex. noop), MMC should resort to
  non-packed execution, respecting the system configuration choice.
 
  Looking deeper into this, I think a better approach would be to set
  the prep_rq_fn of the request_queue, with a custom mmc function that
  decides if the requests are packable or not, and return a
  BLKPREP_DEFER for those that can't be packed.
 
  MMC layer anticipates the favorable requests for packing from I/O
  scheduler.
  Obviously request order from I/O scheduler might be poor for packing
 and
  requests can't be packed.
  But that doesn't mean mmc layer need to wait a better pack-case.
  BLKPREP_DEFER may give rise to I/O latency. Top of request will be
  deferred next time.
  If request can't be packed, it'd rather be sent at once than delayed
  by returning a BLKPREP_DEFER for better responsiveness.
 
  Thanks.
  Seungwon Jeon.
 The decision whether a request is packable or not should not be made per
 request, therefore I don't see the need for using req_prep_fn. The MMC
 layer can peek each request and decide if to pack it or not when
 preparing
 the packed list (as suggested in this patch). The scheduler changes will
 improve the probability of getting a list that can be packed.
 My suggestion for the scheduler change is:
 The block layer will be notified of the packing limits via new queue
 limits (in blk-settings). We can make it generic to allow all kinds of
 requests lists. Example for the new queue limits can be:
 Is_reqs_list_supported
 Max_num_of_read_reqs_in_list
 Max_num_of_write_reqs_in_list
 max_blk_cnt_in_list
 Is_list_interleaved (optional - to support read and write requests to be
 linked together, not the case for eMMC4.5)
 The scheduler, that will have all the required info in the queue limits,
 will be able to decide which requests can be in the same list and move
 all
 of them to the dispatch queue (in one elevator_dispatch_fn call).

 If probability of packing gets higher, it would be nice.
 We need to think more.

 I see 2 options for preparing the list:

 Option 1. blk_fetch_request will prepare the list and return a list of
 requests (will require a change in struct request to include the list
 but
 can be more generic).

 Option 2. The MMC layer will prepare the list. After fetching one
 request
 the MMC layer can call blk_peek_request and check if the next request is
 packable or 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-16 Thread Seungwon Jeon
Maya Erez wrote:
  +               phys_segments +=  next-nr_phys_segments;
  +               if (phys_segments  max_phys_segs) {
  +                       blk_requeue_request(q, next);
  +                       break;
  +               }
 I mentioned this before - if the next request is not packable
  and
requeued,
 blk_fetch_request will retrieve it again and this while loop
  will
   never terminate.

 If next request is not packable, it is requeued and 'break'
   terminates
this loop.
 So it not infinite.
Right !! But that doesn't help finding the commands that are
  packable.
   Ideally, you'd need to pack all neighbouring requests into one packed
   command.
The way CFQ works, it is not necessary that the fetch would return
  all
   outstanding
requests that are packable (unless you invoke a forced dispatch)
  It
   would be good to see some numbers on the number of pack hits /
   misses
that
you would encounter with this logic, on a typical usecase.
Is it considered only for CFQ scheduler? How about other I/O
  scheduler?
   If all requests are drained from scheduler queue forcedly,
the number of candidate to be packed can be increased.
However we may lose the unique CFQ's strength and MMC D/D may take
  the
   CFQ's duty.
Basically, this patch accommodates the origin order requests from
  I/O
   scheduler.
   
  
   In order to better utilize the packed commands feature and achieve
  the
   best performance improvements I think that the command packing should
  be
   done in the block layer, according to the scheduler policy.
   That is, the scheduler should be aware of the capability of the
  device to
   receive a request list and its constrains (such as maximum number of
   requests, max number of sectors etc) and use this information as a
   factor
   to its algorithm.
   This way you keep the decision making in the hands of the scheduler
  while
   the MMC layer will only have to send this list as a packed command.
  
   Yes, it would be another interesting approach.
   Command packing you mentioned means gathering request among same
  direction(read/write)?
   Currently I/O scheduler may know device constrains which MMC driver
  informs
   with the exception of order information for packed command.
   But I think the dependency of I/O scheduler may be able to come up.
   How can MMC layer treat packed command with I/O scheduler which
  doesn't support this?
 
  The very act of packing presumes some sorting and re-ordering at the
  I/O scheduler level.
  When no such sorting is done (ex. noop), MMC should resort to
  non-packed execution, respecting the system configuration choice.
 
  Looking deeper into this, I think a better approach would be to set
  the prep_rq_fn of the request_queue, with a custom mmc function that
  decides if the requests are packable or not, and return a
  BLKPREP_DEFER for those that can't be packed.
 
  MMC layer anticipates the favorable requests for packing from I/O
  scheduler.
  Obviously request order from I/O scheduler might be poor for packing and
  requests can't be packed.
  But that doesn't mean mmc layer need to wait a better pack-case.
  BLKPREP_DEFER may give rise to I/O latency. Top of request will be
  deferred next time.
  If request can't be packed, it'd rather be sent at once than delayed
  by returning a BLKPREP_DEFER for better responsiveness.
 
  Thanks.
  Seungwon Jeon.
 The decision whether a request is packable or not should not be made per
 request, therefore I don't see the need for using req_prep_fn. The MMC
 layer can peek each request and decide if to pack it or not when preparing
 the packed list (as suggested in this patch). The scheduler changes will
 improve the probability of getting a list that can be packed.
 My suggestion for the scheduler change is:
 The block layer will be notified of the packing limits via new queue
 limits (in blk-settings). We can make it generic to allow all kinds of
 requests lists. Example for the new queue limits can be:
 Is_reqs_list_supported
 Max_num_of_read_reqs_in_list
 Max_num_of_write_reqs_in_list
 max_blk_cnt_in_list
 Is_list_interleaved (optional - to support read and write requests to be
 linked together, not the case for eMMC4.5)
 The scheduler, that will have all the required info in the queue limits,
 will be able to decide which requests can be in the same list and move all
 of them to the dispatch queue (in one elevator_dispatch_fn call).

If probability of packing gets higher, it would be nice.
We need to think more.
 
 I see 2 options for preparing the list:
 
 Option 1. blk_fetch_request will prepare the list and return a list of
 requests (will require a change in struct request to include the list but
 can be more generic).
 
 Option 2. The MMC layer will prepare the list. After fetching one request
 the MMC layer can call blk_peek_request and check if the next request is
 packable or not. By keeping 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-15 Thread merez
 +       if (rqc)
 +               reqs = mmc_blk_chk_packable(mq, rqc);
 
  It would be best to keep all the calls to blk_fetch_request in the
 same
  location. Therefore, I suggest to move the call to
 mmc_blk_chk_packable
  to
  mmc/card/queue.c after the first request is fetched.
 
  At the first time, I considered that way.
  I'll do more, if possible.
 I considered more.
 I think that mmc_blk_chk_packable would rather be called only for r/w type
 than all request type(e.g. discard, flush).

mmc_blk_chk_packable can check the cmd_flags of the request to verify it's
not a flush/disacrad etc. In such cases will not pack.

--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-15 Thread merez
 +               phys_segments +=  next-nr_phys_segments;
 +               if (phys_segments  max_phys_segs) {
 +                       blk_requeue_request(q, next);
 +                       break;
 +               }
I mentioned this before - if the next request is not packable
 and
   requeued,
blk_fetch_request will retrieve it again and this while loop
 will
  never terminate.
   
If next request is not packable, it is requeued and 'break'
  terminates
   this loop.
So it not infinite.
   Right !! But that doesn't help finding the commands that are
 packable.
  Ideally, you'd need to pack all neighbouring requests into one packed
  command.
   The way CFQ works, it is not necessary that the fetch would return
 all
  outstanding
   requests that are packable (unless you invoke a forced dispatch)
 It
  would be good to see some numbers on the number of pack hits /
  misses
   that
   you would encounter with this logic, on a typical usecase.
   Is it considered only for CFQ scheduler? How about other I/O
 scheduler?
  If all requests are drained from scheduler queue forcedly,
   the number of candidate to be packed can be increased.
   However we may lose the unique CFQ's strength and MMC D/D may take
 the
  CFQ's duty.
   Basically, this patch accommodates the origin order requests from
 I/O
  scheduler.
  
 
  In order to better utilize the packed commands feature and achieve
 the
  best performance improvements I think that the command packing should
 be
  done in the block layer, according to the scheduler policy.
  That is, the scheduler should be aware of the capability of the
 device to
  receive a request list and its constrains (such as maximum number of
  requests, max number of sectors etc) and use this information as a
  factor
  to its algorithm.
  This way you keep the decision making in the hands of the scheduler
 while
  the MMC layer will only have to send this list as a packed command.
 
  Yes, it would be another interesting approach.
  Command packing you mentioned means gathering request among same
 direction(read/write)?
  Currently I/O scheduler may know device constrains which MMC driver
 informs
  with the exception of order information for packed command.
  But I think the dependency of I/O scheduler may be able to come up.
  How can MMC layer treat packed command with I/O scheduler which
 doesn't support this?

 The very act of packing presumes some sorting and re-ordering at the
 I/O scheduler level.
 When no such sorting is done (ex. noop), MMC should resort to
 non-packed execution, respecting the system configuration choice.

 Looking deeper into this, I think a better approach would be to set
 the prep_rq_fn of the request_queue, with a custom mmc function that
 decides if the requests are packable or not, and return a
 BLKPREP_DEFER for those that can't be packed.

 MMC layer anticipates the favorable requests for packing from I/O
 scheduler.
 Obviously request order from I/O scheduler might be poor for packing and
 requests can't be packed.
 But that doesn't mean mmc layer need to wait a better pack-case.
 BLKPREP_DEFER may give rise to I/O latency. Top of request will be
 deferred next time.
 If request can't be packed, it'd rather be sent at once than delayed
 by returning a BLKPREP_DEFER for better responsiveness.

 Thanks.
 Seungwon Jeon.
The decision whether a request is packable or not should not be made per
request, therefore I don't see the need for using req_prep_fn. The MMC
layer can peek each request and decide if to pack it or not when preparing
the packed list (as suggested in this patch). The scheduler changes will
improve the probability of getting a list that can be packed.
My suggestion for the scheduler change is:
The block layer will be notified of the packing limits via new queue
limits (in blk-settings). We can make it generic to allow all kinds of
requests lists. Example for the new queue limits can be:
Is_reqs_list_supported
Max_num_of_read_reqs_in_list
Max_num_of_write_reqs_in_list
max_blk_cnt_in_list
Is_list_interleaved (optional - to support read and write requests to be
linked together, not the case for eMMC4.5)
The scheduler, that will have all the required info in the queue limits,
will be able to decide which requests can be in the same list and move all
of them to the dispatch queue (in one elevator_dispatch_fn call).

I see 2 options for preparing the list:

Option 1. blk_fetch_request will prepare the list and return a list of
requests (will require a change in struct request to include the list but
can be more generic).

Option 2. The MMC layer will prepare the list. After fetching one request
the MMC layer can call blk_peek_request and check if the next request is
packable or not. By keeping all the calls to blk_peek_request under the
same queue lock we can guarantee to get the requests that the scheduler
pushed to the dispatch queue (this requires mmc_blk_chk_packable to move
to block.c). If the 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-14 Thread Seungwon Jeon
S, Venkatraman svenk...@ti.com wrote:
 On Fri, Nov 11, 2011 at 12:56 PM, Seungwon Jeon tgih@samsung.com wrote:
  Maya Erez wrote:
  On Thu, Nov 10, 2011 Maya Erez wrote:
   S, Venkatraman svenk...@ti.com wrote:
   On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
  wrote:
 +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
   request *req)
 
  The function prepares the checkable list and not only checks if packing is
  possible, therefore I think its name should change to reflect its real
  action
  I labored at naming. Isn't it proper? :)
  Do you have any recommendation?
  group_pack_req?
 
 
 +       if (!(md-flags  MMC_BLK_CMD23) 
 +                       !card-ext_csd.packed_event_en)
 +               goto no_packed;
 
  Having the condition with a  can lead to cases where CMD23 is not
  supported and we send packed commands. Therfore the condition should be
  changed to || or be splitted to 2 separate checks.
  Also, according to the standard the generic error flag in
  PACKED_COMMAND_STATUS is set in case of any error and having
  PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
  the packed_event_en should be a mandatory condition for using packed
  coammnds.
  ... cases where CMD23 is not supported and we send packed commands?
  Packed command must not be allowed in such a case.
  It works only with predefined mode which is essential fator.
  And spec doesn't mentioned PACKED_EVENT_EN must be set.
  So Packed command can be sent regardless PACKED_EVENT_EN,
  but it's not complete without reporting of error.
  Then host driver may suffer error recovery.
  Why packed command is used without error reporting?
 
 
 +       if (mmc_req_rel_wr(cur) 
 +                       (md-flags  MMC_BLK_REL_WR) 
 +                       !en_rel_wr) {
 +               goto no_packed;
 +       }
 
  Can you please explain this condition and its purpose?
 
  In the case where reliable write is request but enhanced reliable write
  is not supported, write access must be partial according to
  reliable write sector count. Because even a single request can be split,
  packed command is not allowed in this case.
 
 +               phys_segments +=  next-nr_phys_segments;
 +               if (phys_segments  max_phys_segs) {
 +                       blk_requeue_request(q, next);
 +                       break;
 +               }
I mentioned this before - if the next request is not packable and
   requeued,
blk_fetch_request will retrieve it again and this while loop will
  never terminate.
   
If next request is not packable, it is requeued and 'break'
  terminates
   this loop.
So it not infinite.
   Right !! But that doesn't help finding the commands that are packable.
  Ideally, you'd need to pack all neighbouring requests into one packed
  command.
   The way CFQ works, it is not necessary that the fetch would return all
  outstanding
   requests that are packable (unless you invoke a forced dispatch) It
  would be good to see some numbers on the number of pack hits /
  misses
   that
   you would encounter with this logic, on a typical usecase.
   Is it considered only for CFQ scheduler? How about other I/O scheduler?
  If all requests are drained from scheduler queue forcedly,
   the number of candidate to be packed can be increased.
   However we may lose the unique CFQ's strength and MMC D/D may take the
  CFQ's duty.
   Basically, this patch accommodates the origin order requests from I/O
  scheduler.
  
 
  In order to better utilize the packed commands feature and achieve the
  best performance improvements I think that the command packing should be
  done in the block layer, according to the scheduler policy.
  That is, the scheduler should be aware of the capability of the device to
  receive a request list and its constrains (such as maximum number of
  requests, max number of sectors etc) and use this information as a  factor
  to its algorithm.
  This way you keep the decision making in the hands of the scheduler while
  the MMC layer will only have to send this list as a packed command.
 
  Yes, it would be another interesting approach.
  Command packing you mentioned means gathering request among same 
  direction(read/write)?
  Currently I/O scheduler may know device constrains which MMC driver informs
  with the exception of order information for packed command.
  But I think the dependency of I/O scheduler may be able to come up.
  How can MMC layer treat packed command with I/O scheduler which doesn't 
  support this?
 
 The very act of packing presumes some sorting and re-ordering at the
 I/O scheduler level.
 When no such sorting is done (ex. noop), MMC should resort to
 non-packed execution, respecting the system configuration choice.
 
 Looking deeper into this, I think a better approach would be to set
 the prep_rq_fn of the request_queue, with a custom mmc function that
 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-14 Thread Seungwon Jeon
Maya Erez wrote:
  On Fri, Nov 11, 2011 at 12:56 PM, Seungwon Jeon tgih@samsung.com
  wrote:
  Maya Erez wrote:
  On Thu, Nov 10, 2011 Maya Erez wrote:
   S, Venkatraman svenk...@ti.com wrote:
   On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
  wrote:
 +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
   request *req)
 
  The function prepares the checkable list and not only checks if packing
  is
  possible, therefore I think its name should change to reflect its real
  action
  I labored at naming. Isn't it proper? :)
  Do you have any recommendation?
  group_pack_req?
 
 
 +       if (!(md-flags  MMC_BLK_CMD23) 
 +                       !card-ext_csd.packed_event_en)
 +               goto no_packed;
 
  Having the condition with a  can lead to cases where CMD23 is not
  supported and we send packed commands. Therfore the condition should be
  changed to || or be splitted to 2 separate checks.
  Also, according to the standard the generic error flag in
  PACKED_COMMAND_STATUS is set in case of any error and having
  PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
  the packed_event_en should be a mandatory condition for using packed
  coammnds.
  ... cases where CMD23 is not supported and we send packed commands?
  Packed command must not be allowed in such a case.
  It works only with predefined mode which is essential fator.
  And spec doesn't mentioned PACKED_EVENT_EN must be set.
  So Packed command can be sent regardless PACKED_EVENT_EN,
  but it's not complete without reporting of error.
  Then host driver may suffer error recovery.
  Why packed command is used without error reporting?
 Let me better explain my comment:
 If the first condition (!(md-flags  MMC_BLK_CMD23) is 1 (meaning
 MMC_BLK_CMD23 flag is not set), then in case card-ext_csd.packed_event_en
 is 1 the second condition will be 0 and we won't goto no_packed;. In
 this case, CMD_23 is not supported but we don't exit the function.
 If you want both conditions to be mandatory you need to use here an ||.
Thank you for clearing comment.
This condition will be fixed.

 
 
 +       if (mmc_req_rel_wr(cur) 
 +                       (md-flags  MMC_BLK_REL_WR) 
 +                       !en_rel_wr) {
 +               goto no_packed;
 +       }
 
  Can you please explain this condition and its purpose?
 
  In the case where reliable write is request but enhanced reliable write
  is not supported, write access must be partial according to
  reliable write sector count. Because even a single request can be split,
  packed command is not allowed in this case.
 
 +               phys_segments +=  next-nr_phys_segments;
 +               if (phys_segments  max_phys_segs) {
 +                       blk_requeue_request(q, next);
 +                       break;
 +               }
I mentioned this before - if the next request is not packable and
   requeued,
blk_fetch_request will retrieve it again and this while loop will
  never terminate.
   
If next request is not packable, it is requeued and 'break'
  terminates
   this loop.
So it not infinite.
   Right !! But that doesn't help finding the commands that are
  packable.
  Ideally, you'd need to pack all neighbouring requests into one packed
  command.
   The way CFQ works, it is not necessary that the fetch would return
  all
  outstanding
   requests that are packable (unless you invoke a forced dispatch) It
  would be good to see some numbers on the number of pack hits /
  misses
   that
   you would encounter with this logic, on a typical usecase.
   Is it considered only for CFQ scheduler? How about other I/O
  scheduler?
  If all requests are drained from scheduler queue forcedly,
   the number of candidate to be packed can be increased.
   However we may lose the unique CFQ's strength and MMC D/D may take
  the
  CFQ's duty.
   Basically, this patch accommodates the origin order requests from I/O
  scheduler.
  
 
  In order to better utilize the packed commands feature and achieve the
  best performance improvements I think that the command packing should
  be
  done in the block layer, according to the scheduler policy.
  That is, the scheduler should be aware of the capability of the device
  to
  receive a request list and its constrains (such as maximum number of
  requests, max number of sectors etc) and use this information as a
   factor
  to its algorithm.
  This way you keep the decision making in the hands of the scheduler
  while
  the MMC layer will only have to send this list as a packed command.
 
  Yes, it would be another interesting approach.
  Command packing you mentioned means gathering request among same
  direction(read/write)?
  Currently I/O scheduler may know device constrains which MMC driver
  informs
  with the exception of order information for packed command.
  But I think the dependency of I/O scheduler may be able to come up.
  How 

Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-13 Thread merez
 On Fri, Nov 11, 2011 at 12:56 PM, Seungwon Jeon tgih@samsung.com
 wrote:
 Maya Erez wrote:
 On Thu, Nov 10, 2011 Maya Erez wrote:
  S, Venkatraman svenk...@ti.com wrote:
  On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon
 tgih@samsung.com
 wrote:
+static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
  request *req)

 The function prepares the checkable list and not only checks if
 packing
 is
 possible, therefore I think its name should change to reflect its real
 action
 I labored at naming. Isn't it proper? :)
 Do you have any recommendation?
 group_pack_req?

How about mmc_blk_prep_packed_list?

Thanks,
Maya Erez
Consultant for Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum


--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-11 Thread S, Venkatraman
On Fri, Nov 11, 2011 at 12:56 PM, Seungwon Jeon tgih@samsung.com wrote:
 Maya Erez wrote:
 On Thu, Nov 10, 2011 Maya Erez wrote:
  S, Venkatraman svenk...@ti.com wrote:
  On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
 wrote:
+static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
  request *req)

 The function prepares the checkable list and not only checks if packing is
 possible, therefore I think its name should change to reflect its real
 action
 I labored at naming. Isn't it proper? :)
 Do you have any recommendation?
 group_pack_req?


+       if (!(md-flags  MMC_BLK_CMD23) 
+                       !card-ext_csd.packed_event_en)
+               goto no_packed;

 Having the condition with a  can lead to cases where CMD23 is not
 supported and we send packed commands. Therfore the condition should be
 changed to || or be splitted to 2 separate checks.
 Also, according to the standard the generic error flag in
 PACKED_COMMAND_STATUS is set in case of any error and having
 PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
 the packed_event_en should be a mandatory condition for using packed
 coammnds.
 ... cases where CMD23 is not supported and we send packed commands?
 Packed command must not be allowed in such a case.
 It works only with predefined mode which is essential fator.
 And spec doesn't mentioned PACKED_EVENT_EN must be set.
 So Packed command can be sent regardless PACKED_EVENT_EN,
 but it's not complete without reporting of error.
 Then host driver may suffer error recovery.
 Why packed command is used without error reporting?


+       if (mmc_req_rel_wr(cur) 
+                       (md-flags  MMC_BLK_REL_WR) 
+                       !en_rel_wr) {
+               goto no_packed;
+       }

 Can you please explain this condition and its purpose?

 In the case where reliable write is request but enhanced reliable write
 is not supported, write access must be partial according to
 reliable write sector count. Because even a single request can be split,
 packed command is not allowed in this case.

+               phys_segments +=  next-nr_phys_segments;
+               if (phys_segments  max_phys_segs) {
+                       blk_requeue_request(q, next);
+                       break;
+               }
   I mentioned this before - if the next request is not packable and
  requeued,
   blk_fetch_request will retrieve it again and this while loop will
 never terminate.
  
   If next request is not packable, it is requeued and 'break'
 terminates
  this loop.
   So it not infinite.
  Right !! But that doesn't help finding the commands that are packable.
 Ideally, you'd need to pack all neighbouring requests into one packed
 command.
  The way CFQ works, it is not necessary that the fetch would return all
 outstanding
  requests that are packable (unless you invoke a forced dispatch) It
 would be good to see some numbers on the number of pack hits /
 misses
  that
  you would encounter with this logic, on a typical usecase.
  Is it considered only for CFQ scheduler? How about other I/O scheduler?
 If all requests are drained from scheduler queue forcedly,
  the number of candidate to be packed can be increased.
  However we may lose the unique CFQ's strength and MMC D/D may take the
 CFQ's duty.
  Basically, this patch accommodates the origin order requests from I/O
 scheduler.
 

 In order to better utilize the packed commands feature and achieve the
 best performance improvements I think that the command packing should be
 done in the block layer, according to the scheduler policy.
 That is, the scheduler should be aware of the capability of the device to
 receive a request list and its constrains (such as maximum number of
 requests, max number of sectors etc) and use this information as a  factor
 to its algorithm.
 This way you keep the decision making in the hands of the scheduler while
 the MMC layer will only have to send this list as a packed command.

 Yes, it would be another interesting approach.
 Command packing you mentioned means gathering request among same 
 direction(read/write)?
 Currently I/O scheduler may know device constrains which MMC driver informs
 with the exception of order information for packed command.
 But I think the dependency of I/O scheduler may be able to come up.
 How can MMC layer treat packed command with I/O scheduler which doesn't 
 support this?

The very act of packing presumes some sorting and re-ordering at the
I/O scheduler level.
When no such sorting is done (ex. noop), MMC should resort to
non-packed execution, respecting the system configuration choice.

Looking deeper into this, I think a better approach would be to set
the prep_rq_fn of the request_queue, with a custom mmc function that
decides if the requests are packable or not, and return a
BLKPREP_DEFER for those that can't be packed.


+       if (rqc)
+               

Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-11 Thread merez
 On Fri, Nov 11, 2011 at 12:56 PM, Seungwon Jeon tgih@samsung.com
 wrote:
 Maya Erez wrote:
 On Thu, Nov 10, 2011 Maya Erez wrote:
  S, Venkatraman svenk...@ti.com wrote:
  On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
 wrote:
+static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
  request *req)

 The function prepares the checkable list and not only checks if packing
 is
 possible, therefore I think its name should change to reflect its real
 action
 I labored at naming. Isn't it proper? :)
 Do you have any recommendation?
 group_pack_req?


+       if (!(md-flags  MMC_BLK_CMD23) 
+                       !card-ext_csd.packed_event_en)
+               goto no_packed;

 Having the condition with a  can lead to cases where CMD23 is not
 supported and we send packed commands. Therfore the condition should be
 changed to || or be splitted to 2 separate checks.
 Also, according to the standard the generic error flag in
 PACKED_COMMAND_STATUS is set in case of any error and having
 PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
 the packed_event_en should be a mandatory condition for using packed
 coammnds.
 ... cases where CMD23 is not supported and we send packed commands?
 Packed command must not be allowed in such a case.
 It works only with predefined mode which is essential fator.
 And spec doesn't mentioned PACKED_EVENT_EN must be set.
 So Packed command can be sent regardless PACKED_EVENT_EN,
 but it's not complete without reporting of error.
 Then host driver may suffer error recovery.
 Why packed command is used without error reporting?
Let me better explain my comment:
If the first condition (!(md-flags  MMC_BLK_CMD23) is 1 (meaning
MMC_BLK_CMD23 flag is not set), then in case card-ext_csd.packed_event_en
is 1 the second condition will be 0 and we won't goto no_packed;. In
this case, CMD_23 is not supported but we don't exit the function.
If you want both conditions to be mandatory you need to use here an ||.


+       if (mmc_req_rel_wr(cur) 
+                       (md-flags  MMC_BLK_REL_WR) 
+                       !en_rel_wr) {
+               goto no_packed;
+       }

 Can you please explain this condition and its purpose?

 In the case where reliable write is request but enhanced reliable write
 is not supported, write access must be partial according to
 reliable write sector count. Because even a single request can be split,
 packed command is not allowed in this case.

+               phys_segments +=  next-nr_phys_segments;
+               if (phys_segments  max_phys_segs) {
+                       blk_requeue_request(q, next);
+                       break;
+               }
   I mentioned this before - if the next request is not packable and
  requeued,
   blk_fetch_request will retrieve it again and this while loop will
 never terminate.
  
   If next request is not packable, it is requeued and 'break'
 terminates
  this loop.
   So it not infinite.
  Right !! But that doesn't help finding the commands that are
 packable.
 Ideally, you'd need to pack all neighbouring requests into one packed
 command.
  The way CFQ works, it is not necessary that the fetch would return
 all
 outstanding
  requests that are packable (unless you invoke a forced dispatch) It
 would be good to see some numbers on the number of pack hits /
 misses
  that
  you would encounter with this logic, on a typical usecase.
  Is it considered only for CFQ scheduler? How about other I/O
 scheduler?
 If all requests are drained from scheduler queue forcedly,
  the number of candidate to be packed can be increased.
  However we may lose the unique CFQ's strength and MMC D/D may take
 the
 CFQ's duty.
  Basically, this patch accommodates the origin order requests from I/O
 scheduler.
 

 In order to better utilize the packed commands feature and achieve the
 best performance improvements I think that the command packing should
 be
 done in the block layer, according to the scheduler policy.
 That is, the scheduler should be aware of the capability of the device
 to
 receive a request list and its constrains (such as maximum number of
 requests, max number of sectors etc) and use this information as a
  factor
 to its algorithm.
 This way you keep the decision making in the hands of the scheduler
 while
 the MMC layer will only have to send this list as a packed command.

 Yes, it would be another interesting approach.
 Command packing you mentioned means gathering request among same
 direction(read/write)?
 Currently I/O scheduler may know device constrains which MMC driver
 informs
 with the exception of order information for packed command.
 But I think the dependency of I/O scheduler may be able to come up.
 How can MMC layer treat packed command with I/O scheduler which doesn't
 support this?

 The very act of packing presumes some sorting and re-ordering at the
 I/O scheduler level.
 When no such sorting 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-10 Thread merez
On Thu, Nov 10, 2011 Maya Erez wrote:
 S, Venkatraman svenk...@ti.com wrote:
 On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
wrote:
   +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
 request *req)

The function prepares the checkable list and not only checks if packing is
possible, therefore I think its name should change to reflect its real
action.

   +       if (!(md-flags  MMC_BLK_CMD23) 
   +                       !card-ext_csd.packed_event_en)
   +               goto no_packed;

Having the condition with a  can lead to cases where CMD23 is not 
supported and we send packed commands. Therfore the condition should be
changed to || or be splitted to 2 separate checks.
Also, according to the standard the generic error flag in
PACKED_COMMAND_STATUS is set in case of any error and having
PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
the packed_event_en should be a mandatory condition for using packed
coammnds.

   +       if (mmc_req_rel_wr(cur) 
   +                       (md-flags  MMC_BLK_REL_WR) 
   +                       !en_rel_wr) {
   +               goto no_packed;
   +       }

Can you please explain this condition and its purpose?

   +               phys_segments +=  next-nr_phys_segments;
   +               if (phys_segments  max_phys_segs) {
   +                       blk_requeue_request(q, next);
   +                       break;
   +               }
  I mentioned this before - if the next request is not packable and
 requeued,
  blk_fetch_request will retrieve it again and this while loop will
never terminate.
 
  If next request is not packable, it is requeued and 'break'
terminates
 this loop.
  So it not infinite.
 Right !! But that doesn't help finding the commands that are packable.
Ideally, you'd need to pack all neighbouring requests into one packed
command.
 The way CFQ works, it is not necessary that the fetch would return all
outstanding
 requests that are packable (unless you invoke a forced dispatch) It
would be good to see some numbers on the number of pack hits /
misses
 that
 you would encounter with this logic, on a typical usecase.
 Is it considered only for CFQ scheduler? How about other I/O scheduler?
If all requests are drained from scheduler queue forcedly,
 the number of candidate to be packed can be increased.
 However we may lose the unique CFQ's strength and MMC D/D may take the
CFQ's duty.
 Basically, this patch accommodates the origin order requests from I/O
scheduler.


In order to better utilize the packed commands feature and achieve the
best performance improvements I think that the command packing should be
done in the block layer, according to the scheduler policy.
That is, the scheduler should be aware of the capability of the device to
receive a request list and its constrains (such as maximum number of
requests, max number of sectors etc) and use this information as a  factor
to its algorithm.
This way you keep the decision making in the hands of the scheduler while
the MMC layer will only have to send this list as a packed command.

   +       if (rqc)
   +               reqs = mmc_blk_chk_packable(mq, rqc);

It would be best to keep all the calls to blk_fetch_request in the same
location. Therefore, I suggest to move the call to mmc_blk_chk_packable to
mmc/card/queue.c after the first request is fetched.

    cmd_abort:
   -       spin_lock_irq(md-lock);
   -       while (ret)
   -               ret = __blk_end_request(req, -EIO,
 blk_rq_cur_bytes(req));
   -       spin_unlock_irq(md-lock);
   +       if (mq_rq-packed_cmd != MMC_PACKED_NONE) {

This should be the case for MMC_PACKED_NONE.

   +               spin_lock_irq(md-lock);
   +               while (ret)
   +                       ret = __blk_end_request(req, -EIO,
 blk_rq_cur_bytes(req));

Do we need the while or should it be an if? In other cases where
__blk_end_request is called there is no such while.

Thanks,
Maya Erez
Consultant for Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum




--
To unsubscribe from this list: send the line unsubscribe linux-samsung-soc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-10 Thread Seungwon Jeon
Maya Erez wrote:
 On Thu, Nov 10, 2011 Maya Erez wrote:
  S, Venkatraman svenk...@ti.com wrote:
  On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com
 wrote:
+static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct
  request *req)
 
 The function prepares the checkable list and not only checks if packing is
 possible, therefore I think its name should change to reflect its real
 action
I labored at naming. Isn't it proper? :)
Do you have any recommendation?
group_pack_req?

 
+       if (!(md-flags  MMC_BLK_CMD23) 
+                       !card-ext_csd.packed_event_en)
+               goto no_packed;
 
 Having the condition with a  can lead to cases where CMD23 is not
 supported and we send packed commands. Therfore the condition should be
 changed to || or be splitted to 2 separate checks.
 Also, according to the standard the generic error flag in
 PACKED_COMMAND_STATUS is set in case of any error and having
 PACKED_EVENT_EN is only optional. Therefore, I don't think that setting
 the packed_event_en should be a mandatory condition for using packed
 coammnds.
... cases where CMD23 is not supported and we send packed commands?
Packed command must not be allowed in such a case.
It works only with predefined mode which is essential fator.
And spec doesn't mentioned PACKED_EVENT_EN must be set.
So Packed command can be sent regardless PACKED_EVENT_EN,
but it's not complete without reporting of error.
Then host driver may suffer error recovery.
Why packed command is used without error reporting?

 
+       if (mmc_req_rel_wr(cur) 
+                       (md-flags  MMC_BLK_REL_WR) 
+                       !en_rel_wr) {
+               goto no_packed;
+       }
 
 Can you please explain this condition and its purpose?
 
In the case where reliable write is request but enhanced reliable write
is not supported, write access must be partial according to
reliable write sector count. Because even a single request can be split,
packed command is not allowed in this case.

+               phys_segments +=  next-nr_phys_segments;
+               if (phys_segments  max_phys_segs) {
+                       blk_requeue_request(q, next);
+                       break;
+               }
   I mentioned this before - if the next request is not packable and
  requeued,
   blk_fetch_request will retrieve it again and this while loop will
 never terminate.
  
   If next request is not packable, it is requeued and 'break'
 terminates
  this loop.
   So it not infinite.
  Right !! But that doesn't help finding the commands that are packable.
 Ideally, you'd need to pack all neighbouring requests into one packed
 command.
  The way CFQ works, it is not necessary that the fetch would return all
 outstanding
  requests that are packable (unless you invoke a forced dispatch) It
 would be good to see some numbers on the number of pack hits /
 misses
  that
  you would encounter with this logic, on a typical usecase.
  Is it considered only for CFQ scheduler? How about other I/O scheduler?
 If all requests are drained from scheduler queue forcedly,
  the number of candidate to be packed can be increased.
  However we may lose the unique CFQ's strength and MMC D/D may take the
 CFQ's duty.
  Basically, this patch accommodates the origin order requests from I/O
 scheduler.
 
 
 In order to better utilize the packed commands feature and achieve the
 best performance improvements I think that the command packing should be
 done in the block layer, according to the scheduler policy.
 That is, the scheduler should be aware of the capability of the device to
 receive a request list and its constrains (such as maximum number of
 requests, max number of sectors etc) and use this information as a  factor
 to its algorithm.
 This way you keep the decision making in the hands of the scheduler while
 the MMC layer will only have to send this list as a packed command.
 
Yes, it would be another interesting approach.
Command packing you mentioned means gathering request among same 
direction(read/write)?
Currently I/O scheduler may know device constrains which MMC driver informs
with the exception of order information for packed command.
But I think the dependency of I/O scheduler may be able to come up.
How can MMC layer treat packed command with I/O scheduler which doesn't support 
this?

+       if (rqc)
+               reqs = mmc_blk_chk_packable(mq, rqc);
 
 It would be best to keep all the calls to blk_fetch_request in the same
 location. Therefore, I suggest to move the call to mmc_blk_chk_packable to
 mmc/card/queue.c after the first request is fetched.

At the first time, I considered that way.
I'll do more, if possible.
 
 cmd_abort:
-       spin_lock_irq(md-lock);
-       while (ret)
-               ret = __blk_end_request(req, -EIO,
  blk_rq_cur_bytes(req));
-       spin_unlock_irq(md-lock);
+       if (mq_rq-packed_cmd != 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-06 Thread Seungwon Jeon
S, Venkatraman svenk...@ti.com wrote: 
 On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com wrote:
  S, Venkatraman svenk...@ti.com wrote:
  On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com wrote:
   This patch supports packed command of eMMC4.5 device.
   Several reads(or writes) can be grouped in packed command
   and all data of the individual commands can be sent in a
   single transfer on the bus.
  
   Signed-off-by: Seungwon Jeon tgih@samsung.com
   ---
    drivers/mmc/card/block.c |  355 
   --
    drivers/mmc/card/queue.c |   48 ++-
    drivers/mmc/card/queue.h |   12 ++
    include/linux/mmc/core.h |    3 +
    4 files changed, 404 insertions(+), 14 deletions(-)
  
   diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
   index a1cb21f..6c49656 100644
   --- a/drivers/mmc/card/block.c
   +++ b/drivers/mmc/card/block.c
   @@ -59,6 +59,13 @@ MODULE_ALIAS(mmc:block);
    #define INAND_CMD38_ARG_SECTRIM1 0x81
    #define INAND_CMD38_ARG_SECTRIM2 0x88
  
   +#define mmc_req_rel_wr(req)    (((req-cmd_flags  REQ_FUA) || \
   +                       (req-cmd_flags  REQ_META))  \
   +                       (rq_data_dir(req) == WRITE))
   +#define PACKED_CMD_VER         0x01
   +#define PACKED_CMD_RD          0x01
   +#define PACKED_CMD_WR          0x02
   +
    static DEFINE_MUTEX(block_mutex);
  
    /*
   @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
           * kind.  If it was a write, we may have transitioned to
           * program mode, which we have to wait for it to complete.
           */
   -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
   +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) ||
   +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR)) {
                  u32 status;
                  do {
                          int err = get_card_status(card, status, 5);
   @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card *card,
          if (!brq-data.bytes_xfered)
                  return MMC_BLK_RETRY;
  
   +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
   +               if (unlikely(brq-data.blocks  9 != 
   brq-data.bytes_xfered))
   +                       return MMC_BLK_PARTIAL;
   +               else
   +                       return MMC_BLK_SUCCESS;
   +       }
   +
          if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                  return MMC_BLK_PARTIAL;
  
          return MMC_BLK_SUCCESS;
    }
  
   +static int mmc_blk_packed_err_check(struct mmc_card *card,
   +                            struct mmc_async_req *areq)
   +{
   +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
   mmc_queue_req,
   +                                                   mmc_active);
   +       int err, check, status;
   +       u8 ext_csd[512];
   +
   +       check = mmc_blk_err_check(card, areq);
   +
   +       if (check == MMC_BLK_SUCCESS)
   +               return check;
   +
   +       if (check == MMC_BLK_PARTIAL) {
   +               err = get_card_status(card, status, 0);
   +               if (err)
   +                       return MMC_BLK_ABORT;
   +
   +               if (status  R1_EXP_EVENT) {
   +                       err = mmc_send_ext_csd(card, ext_csd);
   +                       if (err)
   +                               return MMC_BLK_ABORT;
   +
   +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS + 0] 
   +                                               EXT_CSD_PACKED_FAILURE) 
   
   +                                       
   (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
   +                                        EXT_CSD_PACKED_GENERIC_ERROR)) {
   +                               if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
   +                                               
   EXT_CSD_PACKED_INDEXED_ERROR) {
   +                                       /* Make be 0-based */
   +                                       mq_mrq-packed_fail_idx =
   +                                               
   ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
   +                                       return MMC_BLK_PARTIAL;
   +                               } else {
   +                                       return MMC_BLK_RETRY;
   +                               }
   +                       }
   +               } else {
   +                       return MMC_BLK_RETRY;
   +               }
   +       }
   +
   +       if (check != MMC_BLK_ABORT)
   +               return MMC_BLK_RETRY;
   +       else
   +               return MMC_BLK_ABORT;
   +}
   +
    static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                                 struct mmc_card *card,
                                 int disable_multi,
   @@ -1129,6 +1192,211 @@ static void mmc_blk_rw_rq_prep(struct 
   mmc_queue_req *mqrq,
          mmc_queue_bounce_pre(mqrq);
    }
  
   +static u8 

Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-04 Thread S, Venkatraman
On Thu, Nov 3, 2011 at 7:23 AM, Seungwon Jeon tgih@samsung.com wrote:
 S, Venkatraman svenk...@ti.com wrote:
 On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com wrote:
  This patch supports packed command of eMMC4.5 device.
  Several reads(or writes) can be grouped in packed command
  and all data of the individual commands can be sent in a
  single transfer on the bus.
 
  Signed-off-by: Seungwon Jeon tgih@samsung.com
  ---
   drivers/mmc/card/block.c |  355 
  --
   drivers/mmc/card/queue.c |   48 ++-
   drivers/mmc/card/queue.h |   12 ++
   include/linux/mmc/core.h |    3 +
   4 files changed, 404 insertions(+), 14 deletions(-)
 
  diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
  index a1cb21f..6c49656 100644
  --- a/drivers/mmc/card/block.c
  +++ b/drivers/mmc/card/block.c
  @@ -59,6 +59,13 @@ MODULE_ALIAS(mmc:block);
   #define INAND_CMD38_ARG_SECTRIM1 0x81
   #define INAND_CMD38_ARG_SECTRIM2 0x88
 
  +#define mmc_req_rel_wr(req)    (((req-cmd_flags  REQ_FUA) || \
  +                       (req-cmd_flags  REQ_META))  \
  +                       (rq_data_dir(req) == WRITE))
  +#define PACKED_CMD_VER         0x01
  +#define PACKED_CMD_RD          0x01
  +#define PACKED_CMD_WR          0x02
  +
   static DEFINE_MUTEX(block_mutex);
 
   /*
  @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
          * kind.  If it was a write, we may have transitioned to
          * program mode, which we have to wait for it to complete.
          */
  -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
  +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) ||
  +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR)) {
                 u32 status;
                 do {
                         int err = get_card_status(card, status, 5);
  @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card *card,
         if (!brq-data.bytes_xfered)
                 return MMC_BLK_RETRY;
 
  +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
  +               if (unlikely(brq-data.blocks  9 != 
  brq-data.bytes_xfered))
  +                       return MMC_BLK_PARTIAL;
  +               else
  +                       return MMC_BLK_SUCCESS;
  +       }
  +
         if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                 return MMC_BLK_PARTIAL;
 
         return MMC_BLK_SUCCESS;
   }
 
  +static int mmc_blk_packed_err_check(struct mmc_card *card,
  +                            struct mmc_async_req *areq)
  +{
  +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
  mmc_queue_req,
  +                                                   mmc_active);
  +       int err, check, status;
  +       u8 ext_csd[512];
  +
  +       check = mmc_blk_err_check(card, areq);
  +
  +       if (check == MMC_BLK_SUCCESS)
  +               return check;
  +
  +       if (check == MMC_BLK_PARTIAL) {
  +               err = get_card_status(card, status, 0);
  +               if (err)
  +                       return MMC_BLK_ABORT;
  +
  +               if (status  R1_EXP_EVENT) {
  +                       err = mmc_send_ext_csd(card, ext_csd);
  +                       if (err)
  +                               return MMC_BLK_ABORT;
  +
  +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS + 0] 
  +                                               EXT_CSD_PACKED_FAILURE) 
  +                                       
  (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
  +                                        EXT_CSD_PACKED_GENERIC_ERROR)) {
  +                               if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
  +                                               
  EXT_CSD_PACKED_INDEXED_ERROR) {
  +                                       /* Make be 0-based */
  +                                       mq_mrq-packed_fail_idx =
  +                                               
  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
  +                                       return MMC_BLK_PARTIAL;
  +                               } else {
  +                                       return MMC_BLK_RETRY;
  +                               }
  +                       }
  +               } else {
  +                       return MMC_BLK_RETRY;
  +               }
  +       }
  +
  +       if (check != MMC_BLK_ABORT)
  +               return MMC_BLK_RETRY;
  +       else
  +               return MMC_BLK_ABORT;
  +}
  +
   static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                                struct mmc_card *card,
                                int disable_multi,
  @@ -1129,6 +1192,211 @@ static void mmc_blk_rw_rq_prep(struct 
  mmc_queue_req *mqrq,
         mmc_queue_bounce_pre(mqrq);
   }
 
  +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct request *req)
  +{
  +       struct request_queue *q = mq-queue;
  +       struct mmc_card *card = mq-card;
  +       struct 

Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-02 Thread Girish K S
On 2 November 2011 04:03, Seungwon Jeon tgih@samsung.com wrote:
 This patch supports packed command of eMMC4.5 device.
 Several reads(or writes) can be grouped in packed command
 and all data of the individual commands can be sent in a
 single transfer on the bus.

 Signed-off-by: Seungwon Jeon tgih@samsung.com
 ---
  drivers/mmc/card/block.c |  355 
 --
  drivers/mmc/card/queue.c |   48 ++-
  drivers/mmc/card/queue.h |   12 ++
  include/linux/mmc/core.h |    3 +
  4 files changed, 404 insertions(+), 14 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index a1cb21f..6c49656 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -59,6 +59,13 @@ MODULE_ALIAS(mmc:block);
  #define INAND_CMD38_ARG_SECTRIM1 0x81
  #define INAND_CMD38_ARG_SECTRIM2 0x88

 +#define mmc_req_rel_wr(req)    (((req-cmd_flags  REQ_FUA) || \
 +                       (req-cmd_flags  REQ_META))  \
 +                       (rq_data_dir(req) == WRITE))
 +#define PACKED_CMD_VER         0x01
 +#define PACKED_CMD_RD          0x01
 +#define PACKED_CMD_WR          0x02
 +
  static DEFINE_MUTEX(block_mutex);

  /*
 @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
         * kind.  If it was a write, we may have transitioned to
         * program mode, which we have to wait for it to complete.
         */
 -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
 +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) ||
 +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR)) {
                u32 status;
                do {
                        int err = get_card_status(card, status, 5);
 @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card *card,
        if (!brq-data.bytes_xfered)
                return MMC_BLK_RETRY;

 +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
 +               if (unlikely(brq-data.blocks  9 != brq-data.bytes_xfered))
 +                       return MMC_BLK_PARTIAL;
 +               else
 +                       return MMC_BLK_SUCCESS;
 +       }
 +
        if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                return MMC_BLK_PARTIAL;

        return MMC_BLK_SUCCESS;
  }

 +static int mmc_blk_packed_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
 +{
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       int err, check, status;
 +       u8 ext_csd[512];
 +
 +       check = mmc_blk_err_check(card, areq);
 +
 +       if (check == MMC_BLK_SUCCESS)
 +               return check;
 +
 +       if (check == MMC_BLK_PARTIAL) {
 +               err = get_card_status(card, status, 0);
 +               if (err)
 +                       return MMC_BLK_ABORT;
 +
 +               if (status  R1_EXP_EVENT) {
 +                       err = mmc_send_ext_csd(card, ext_csd);
 +                       if (err)
 +                               return MMC_BLK_ABORT;
 +
 +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS + 0] 
 +                                               EXT_CSD_PACKED_FAILURE) 
 +                                       (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
 +                                        EXT_CSD_PACKED_GENERIC_ERROR)) {
 +                               if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
 +                                               EXT_CSD_PACKED_INDEXED_ERROR) 
 {
 +                                       /* Make be 0-based */
 +                                       mq_mrq-packed_fail_idx =
 +                                               
 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
 +                                       return MMC_BLK_PARTIAL;
 +                               } else {
 +                                       return MMC_BLK_RETRY;
 +                               }
 +                       }
 +               } else {
 +                       return MMC_BLK_RETRY;
 +               }
 +       }
 +
 +       if (check != MMC_BLK_ABORT)
 +               return MMC_BLK_RETRY;
 +       else
 +               return MMC_BLK_ABORT;
 +}
 +
  static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                               struct mmc_card *card,
                               int disable_multi,
 @@ -1129,6 +1192,211 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
        mmc_queue_bounce_pre(mqrq);
  }

 +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct request *req)
 +{
 +       struct request_queue *q = mq-queue;
 +       struct mmc_card *card = mq-card;
 +       struct request *cur = req, *next = NULL;
 +       struct mmc_blk_data *md = mq-data;
 +       bool en_rel_wr = card-ext_csd.rel_param  EXT_CSD_WR_REL_PARAM_EN;
 +       unsigned int req_sectors = 0, phys_segments = 0;
 +       unsigned int max_blk_count, 

Re: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-02 Thread S, Venkatraman
On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com wrote:
 This patch supports packed command of eMMC4.5 device.
 Several reads(or writes) can be grouped in packed command
 and all data of the individual commands can be sent in a
 single transfer on the bus.

 Signed-off-by: Seungwon Jeon tgih@samsung.com
 ---
  drivers/mmc/card/block.c |  355 
 --
  drivers/mmc/card/queue.c |   48 ++-
  drivers/mmc/card/queue.h |   12 ++
  include/linux/mmc/core.h |    3 +
  4 files changed, 404 insertions(+), 14 deletions(-)

 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
 index a1cb21f..6c49656 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -59,6 +59,13 @@ MODULE_ALIAS(mmc:block);
  #define INAND_CMD38_ARG_SECTRIM1 0x81
  #define INAND_CMD38_ARG_SECTRIM2 0x88

 +#define mmc_req_rel_wr(req)    (((req-cmd_flags  REQ_FUA) || \
 +                       (req-cmd_flags  REQ_META))  \
 +                       (rq_data_dir(req) == WRITE))
 +#define PACKED_CMD_VER         0x01
 +#define PACKED_CMD_RD          0x01
 +#define PACKED_CMD_WR          0x02
 +
  static DEFINE_MUTEX(block_mutex);

  /*
 @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
         * kind.  If it was a write, we may have transitioned to
         * program mode, which we have to wait for it to complete.
         */
 -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
 +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) ||
 +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR)) {
                u32 status;
                do {
                        int err = get_card_status(card, status, 5);
 @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card *card,
        if (!brq-data.bytes_xfered)
                return MMC_BLK_RETRY;

 +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
 +               if (unlikely(brq-data.blocks  9 != brq-data.bytes_xfered))
 +                       return MMC_BLK_PARTIAL;
 +               else
 +                       return MMC_BLK_SUCCESS;
 +       }
 +
        if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                return MMC_BLK_PARTIAL;

        return MMC_BLK_SUCCESS;
  }

 +static int mmc_blk_packed_err_check(struct mmc_card *card,
 +                            struct mmc_async_req *areq)
 +{
 +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
 mmc_queue_req,
 +                                                   mmc_active);
 +       int err, check, status;
 +       u8 ext_csd[512];
 +
 +       check = mmc_blk_err_check(card, areq);
 +
 +       if (check == MMC_BLK_SUCCESS)
 +               return check;
 +
 +       if (check == MMC_BLK_PARTIAL) {
 +               err = get_card_status(card, status, 0);
 +               if (err)
 +                       return MMC_BLK_ABORT;
 +
 +               if (status  R1_EXP_EVENT) {
 +                       err = mmc_send_ext_csd(card, ext_csd);
 +                       if (err)
 +                               return MMC_BLK_ABORT;
 +
 +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS + 0] 
 +                                               EXT_CSD_PACKED_FAILURE) 
 +                                       (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
 +                                        EXT_CSD_PACKED_GENERIC_ERROR)) {
 +                               if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
 +                                               EXT_CSD_PACKED_INDEXED_ERROR) 
 {
 +                                       /* Make be 0-based */
 +                                       mq_mrq-packed_fail_idx =
 +                                               
 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
 +                                       return MMC_BLK_PARTIAL;
 +                               } else {
 +                                       return MMC_BLK_RETRY;
 +                               }
 +                       }
 +               } else {
 +                       return MMC_BLK_RETRY;
 +               }
 +       }
 +
 +       if (check != MMC_BLK_ABORT)
 +               return MMC_BLK_RETRY;
 +       else
 +               return MMC_BLK_ABORT;
 +}
 +
  static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                               struct mmc_card *card,
                               int disable_multi,
 @@ -1129,6 +1192,211 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
 *mqrq,
        mmc_queue_bounce_pre(mqrq);
  }

 +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct request *req)
 +{
 +       struct request_queue *q = mq-queue;
 +       struct mmc_card *card = mq-card;
 +       struct request *cur = req, *next = NULL;
 +       struct mmc_blk_data *md = mq-data;
 +       bool en_rel_wr = card-ext_csd.rel_param  EXT_CSD_WR_REL_PARAM_EN;
 +       unsigned int req_sectors = 0, phys_segments = 0;
 +       unsigned int 

RE: [PATCH 2/2] mmc: core: Support packed command for eMMC4.5 device

2011-11-02 Thread Seungwon Jeon
S, Venkatraman svenk...@ti.com wrote:
 On Wed, Nov 2, 2011 at 1:33 PM, Seungwon Jeon tgih@samsung.com wrote:
  This patch supports packed command of eMMC4.5 device.
  Several reads(or writes) can be grouped in packed command
  and all data of the individual commands can be sent in a
  single transfer on the bus.
 
  Signed-off-by: Seungwon Jeon tgih@samsung.com
  ---
   drivers/mmc/card/block.c |  355 
  --
   drivers/mmc/card/queue.c |   48 ++-
   drivers/mmc/card/queue.h |   12 ++
   include/linux/mmc/core.h |    3 +
   4 files changed, 404 insertions(+), 14 deletions(-)
 
  diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
  index a1cb21f..6c49656 100644
  --- a/drivers/mmc/card/block.c
  +++ b/drivers/mmc/card/block.c
  @@ -59,6 +59,13 @@ MODULE_ALIAS(mmc:block);
   #define INAND_CMD38_ARG_SECTRIM1 0x81
   #define INAND_CMD38_ARG_SECTRIM2 0x88
 
  +#define mmc_req_rel_wr(req)    (((req-cmd_flags  REQ_FUA) || \
  +                       (req-cmd_flags  REQ_META))  \
  +                       (rq_data_dir(req) == WRITE))
  +#define PACKED_CMD_VER         0x01
  +#define PACKED_CMD_RD          0x01
  +#define PACKED_CMD_WR          0x02
  +
   static DEFINE_MUTEX(block_mutex);
 
   /*
  @@ -943,7 +950,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
          * kind.  If it was a write, we may have transitioned to
          * program mode, which we have to wait for it to complete.
          */
  -       if (!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) {
  +       if ((!mmc_host_is_spi(card-host)  rq_data_dir(req) != READ) ||
  +                       (mq_mrq-packed_cmd == MMC_PACKED_WR_HDR)) {
                 u32 status;
                 do {
                         int err = get_card_status(card, status, 5);
  @@ -980,12 +988,67 @@ static int mmc_blk_err_check(struct mmc_card *card,
         if (!brq-data.bytes_xfered)
                 return MMC_BLK_RETRY;
 
  +       if (mq_mrq-packed_cmd != MMC_PACKED_NONE) {
  +               if (unlikely(brq-data.blocks  9 != 
  brq-data.bytes_xfered))
  +                       return MMC_BLK_PARTIAL;
  +               else
  +                       return MMC_BLK_SUCCESS;
  +       }
  +
         if (blk_rq_bytes(req) != brq-data.bytes_xfered)
                 return MMC_BLK_PARTIAL;
 
         return MMC_BLK_SUCCESS;
   }
 
  +static int mmc_blk_packed_err_check(struct mmc_card *card,
  +                            struct mmc_async_req *areq)
  +{
  +       struct mmc_queue_req *mq_mrq = container_of(areq, struct 
  mmc_queue_req,
  +                                                   mmc_active);
  +       int err, check, status;
  +       u8 ext_csd[512];
  +
  +       check = mmc_blk_err_check(card, areq);
  +
  +       if (check == MMC_BLK_SUCCESS)
  +               return check;
  +
  +       if (check == MMC_BLK_PARTIAL) {
  +               err = get_card_status(card, status, 0);
  +               if (err)
  +                       return MMC_BLK_ABORT;
  +
  +               if (status  R1_EXP_EVENT) {
  +                       err = mmc_send_ext_csd(card, ext_csd);
  +                       if (err)
  +                               return MMC_BLK_ABORT;
  +
  +                       if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS + 0] 
  +                                               EXT_CSD_PACKED_FAILURE) 
  +                                       (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
  
  +                                        EXT_CSD_PACKED_GENERIC_ERROR)) {
  +                               if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] 
  +                                               
  EXT_CSD_PACKED_INDEXED_ERROR) {
  +                                       /* Make be 0-based */
  +                                       mq_mrq-packed_fail_idx =
  +                                               
  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
  +                                       return MMC_BLK_PARTIAL;
  +                               } else {
  +                                       return MMC_BLK_RETRY;
  +                               }
  +                       }
  +               } else {
  +                       return MMC_BLK_RETRY;
  +               }
  +       }
  +
  +       if (check != MMC_BLK_ABORT)
  +               return MMC_BLK_RETRY;
  +       else
  +               return MMC_BLK_ABORT;
  +}
  +
   static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                                struct mmc_card *card,
                                int disable_multi,
  @@ -1129,6 +1192,211 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req 
  *mqrq,
         mmc_queue_bounce_pre(mqrq);
   }
 
  +static u8 mmc_blk_chk_packable(struct mmc_queue *mq, struct request *req)
  +{
  +       struct request_queue *q = mq-queue;
  +       struct mmc_card *card = mq-card;
  +       struct request *cur = req, *next = NULL;
  +       struct mmc_blk_data *md =