Author: sparky                       Date: Mon Jun 19 11:52:36 2006 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- http://www.zen24593.zen.co.uk/hdaps/hdaps_protect.20060430.patch

---- Files affected:
SOURCES:
   kernel-desktop-hdaps_protect.patch (NONE -> 1.1)  (NEW)

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-hdaps_protect.patch
diff -u /dev/null SOURCES/kernel-desktop-hdaps_protect.patch:1.1
--- /dev/null   Mon Jun 19 13:52:36 2006
+++ SOURCES/kernel-desktop-hdaps_protect.patch  Mon Jun 19 13:52:31 2006
@@ -0,0 +1,694 @@
+diff -urN linux-2.6.16.original/block/ll_rw_blk.c 
linux-2.6.16.hdaps/block/ll_rw_blk.c
+--- linux-2.6.16.original/block/ll_rw_blk.c    2006-03-20 05:53:29.000000000 
+0000
++++ linux-2.6.16.hdaps/block/ll_rw_blk.c       2006-03-28 20:39:03.000000000 
+0100
+@@ -39,6 +39,8 @@
+ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+ static void init_request_from_bio(struct request *req, struct bio *bio);
+ static int __make_request(request_queue_t *q, struct bio *bio);
++static int blk_protect_register(request_queue_t *q);
++static void blk_protect_unregister(request_queue_t *q);
+ 
+ /*
+  * For the allocated request tables
+@@ -359,6 +361,18 @@
+ 
+ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
+ 
++void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
++{
++      q->issue_protect_fn = ipf;
++}
++EXPORT_SYMBOL(blk_queue_issue_protect_fn);
++
++void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
++{
++      q->issue_unprotect_fn = iuf;
++}
++EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
++
+ /*
+  * Cache flushing for ordered writes handling
+  */
+@@ -3817,6 +3831,7 @@
+               return ret;
+       }
+ 
++      blk_protect_register(q);
+       return 0;
+ }
+ 
+@@ -3825,9 +3840,119 @@
+       request_queue_t *q = disk->queue;
+ 
+       if (q && q->request_fn) {
++              blk_protect_unregister(q);
+               elv_unregister_queue(q);
+ 
+               kobject_unregister(&q->kobj);
+               kobject_put(&disk->kobj);
+       }
+ }
++
++/*
++ * Restore the unplugging timer that we re-used
++ * to implement the queue freeze timeout...
++ */
++static void blk_unfreeze_work(void *data)
++{
++      request_queue_t *q = (request_queue_t *) data;
++
++      INIT_WORK(&q->unplug_work, blk_unplug_work, q);
++      q->unplug_timer.function = blk_unplug_timeout;
++
++      q->issue_unprotect_fn(q);
++}
++
++/*
++ * Called when the queue freeze timeout expires...
++ */
++static void blk_unfreeze_timeout(unsigned long data)
++{
++      request_queue_t *q = (request_queue_t *) data;
++      kblockd_schedule_work(&q->unplug_work);
++}
++
++/* 
++ * The lower level driver parks and freezes the queue, and this block layer
++ *  function sets up the freeze timeout timer on return. If the queue is
++ *  already frozen then this is called to extend the timer...
++ */
++void blk_freeze_queue(request_queue_t *q, int seconds)
++{
++      /* set/reset the timer */
++      mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
++
++      /* we do this every iteration - is this sane? */
++      INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
++      q->unplug_timer.function = blk_unfreeze_timeout;
++}
++
++/* 
++ * When reading the 'protect' attribute, we return boolean frozen or active
++ * todo:
++ * - maybe we should return seconds remaining instead?
++ */
++static ssize_t queue_protect_show(struct request_queue *q, char *page)
++{
++      return queue_var_show(blk_queue_stopped(q), (page));
++}
++
++/* 
++ * When writing the 'protect' attribute, input is the number of seconds
++ * to freeze the queue for. We call a lower level helper function to 
++ * park the heads and freeze/block the queue, then we make a block layer
++ * call to setup the thaw timeout. If input is 0, then we thaw the queue.
++ */
++static ssize_t queue_protect_store(struct request_queue *q, const char *page, 
size_t count)
++{
++      unsigned long freeze = 0;
++      queue_var_store(&freeze, page, count);
++
++      if(freeze>0) {
++              /* Park and freeze */
++              if (!blk_queue_stopped(q))
++                      q->issue_protect_fn(q);
++              /* set / reset the thaw timer */
++              blk_freeze_queue(q, freeze);
++      }
++      else
++              blk_unfreeze_timeout((unsigned long) q);
++
++      return count;
++}
++
++static struct queue_sysfs_entry queue_protect_entry = {
++      .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
++      .show = queue_protect_show,
++      .store = queue_protect_store,
++};
++
++static int blk_protect_register(request_queue_t *q)
++{
++      int error = 0;
++
++      /* check that the lower level driver has a protect handler */   
++      if (!q->issue_protect_fn)
++              return 1;
++      
++      /* create the attribute */
++      error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
++      if(error){
++              printk(KERN_ERR 
++                      "blk_protect_register(): failed to create protect queue 
attribute!\n");
++              return error;
++      }
++      
++      kobject_get(&q->kobj);
++      return 0;               
++}
++
++static void blk_protect_unregister(request_queue_t *q)
++{
++      /* check that the lower level driver has a protect handler */   
++      if (!q->issue_protect_fn)
++              return;
++
++      /* remove the attribute */
++      sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
++      kobject_put(&q->kobj);
++}
+diff -urN linux-2.6.16.original/drivers/ide/ide-disk.c 
linux-2.6.16.hdaps/drivers/ide/ide-disk.c
+--- linux-2.6.16.original/drivers/ide/ide-disk.c       2006-03-20 
05:53:29.000000000 +0000
++++ linux-2.6.16.hdaps/drivers/ide/ide-disk.c  2006-04-30 13:24:35.000000000 
+0100
+@@ -71,6 +71,10 @@
+ #include <asm/io.h>
+ #include <asm/div64.h>
+ 
++int idedisk_protect_method = 0;
++module_param_named(protect_method, idedisk_protect_method, int, 0444);
++MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 
1=unload, 2=standby)");
++
+ struct ide_disk_obj {
+       ide_drive_t     *drive;
+       ide_driver_t    *driver;
+@@ -727,6 +731,154 @@
+ }
+ 
+ /*
++ * todo:
++ *  - we freeze the queue regardless of success and rely on the 
++ *    ide_protect_queue function to thaw immediately if the command
++ *    failed (to be consistent with the libata handler)... should 
++ *    we also inspect here?
++ */
++void ide_end_protect_rq(struct request *rq, int error)
++{
++      struct completion *waiting = rq->waiting;
++
++      /* spin lock already accquired */
++      if (!blk_queue_stopped(rq->q))
++              blk_stop_queue(rq->q);
++
++      complete(waiting);
++}
++
++int ide_unprotect_queue(request_queue_t *q)
++{
++      struct request  rq;
++      unsigned long flags;
++      int             pending = 0, rc = 0;
++      ide_drive_t     *drive = q->queuedata;
++      u8              args[7], *argbuf = args;
++
++      if (!blk_queue_stopped(q))
++              return -EIO;
++
++      /* Are there any pending jobs on the queue? */
++      pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++      
++      spin_lock_irqsave(q->queue_lock, flags);
++      blk_start_queue(q);
++      spin_unlock_irqrestore(q->queue_lock, flags);
++
++      /* The unload feature of the IDLE_IMMEDIATE command
++         temporarily disables HD power management from spinning down
++         the disk. Any other command will reenable HD pm, so, if
++         there are no pending jobs on the queue, another
++         CHECK_POWER_MODE1 command without the unload feature should do
++         just fine. */
++      if (!pending) {
++              printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, 
re-enabling power management..\n");
++              memset(args, 0, sizeof(args));
++              argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
++              ide_init_drive_cmd(&rq);
++              rq.flags = REQ_DRIVE_TASK;
++              rq.buffer = argbuf;
++              rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
++      }
++
++      return rc;
++}
++
++int ide_protect_queue(request_queue_t *q, int unload)
++{
++      ide_drive_t     *drive = q->queuedata;
++      struct request  rq;
++      u8              args[7], *argbuf = args;
++      int             ret = 0;
++      DECLARE_COMPLETION(wait);
++
++      memset(&rq, 0, sizeof(rq));
++      memset(args, 0, sizeof(args));
++
++      if (blk_queue_stopped(q))
++              return -EIO;
++
++      if (unload) {
++              argbuf[0] = 0xe1;
++              argbuf[1] = 0x44;
++              argbuf[3] = 0x4c;
++              argbuf[4] = 0x4e;
++              argbuf[5] = 0x55;
++      } else
++              argbuf[0] = 0xe0;
++
++      /* Issue the park command & freeze */
++      ide_init_drive_cmd(&rq);
++
++      rq.flags = REQ_DRIVE_TASK;
++      rq.buffer = argbuf;
++      rq.waiting = &wait;
++      rq.end_io = ide_end_protect_rq;
++
++      ret = ide_do_drive_cmd(drive, &rq, ide_next);
++      wait_for_completion(&wait);
++      rq.waiting = NULL;
++
++      if (ret)
++      {
++              printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT 
parked!..\n");
++              ide_unprotect_queue(q);
++              return ret;
++      }
++
++      if (unload) {
++              if (args[3] == 0xc4)
++                      printk(KERN_DEBUG "ide_protect_queue(): head 
parked..\n");
++              else {
++                      /* error parking the head */
++                      printk(KERN_DEBUG "ide_protect_queue(): head NOT 
parked!..\n");
++                      ret = -EIO;
++                      ide_unprotect_queue(q);
++              }
++      } else
++              printk(KERN_DEBUG "ide_protect_queue(): head park not 
requested, used standby!..\n");
++
++      return ret;
++}     
++
++int idedisk_issue_protect_fn(request_queue_t *q)
++{
++      ide_drive_t             *drive = q->queuedata;
++      int unload;
++
++      /*
++       * Check capability of the device -
++       *  - if "idle immediate with unload" is supported we use that, else
++       *    we use "standby immediate" and live with spinning down the drive..
++       *    (Word 84, bit 13 of IDENTIFY DEVICE data)
++       */
++      if (idedisk_protect_method == 1) {
++              unload = 1;     
++              printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method 
requested, overriding drive capability check..\n");
++      }
++      else if (idedisk_protect_method == 2) {
++              unload = 0;     
++              printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method 
requested, overriding drive capability check..\n");
++      }
++      else if (drive->id->cfsse & (1 << 13)) {
++              unload = 1;
++              printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support 
reported by drive..\n");
++      }
++      else {
++              unload = 0;
++              printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support 
NOT reported by drive!..\n");
++      }
++
++      return ide_protect_queue(q, unload);
++}
++
++int idedisk_issue_unprotect_fn(request_queue_t *q)
++{
++      return ide_unprotect_queue(q);
++}
++
++/*
+  * This is tightly woven into the driver->do_special can not touch.
+  * DON'T do it again until a total personality rewrite is committed.
+  */
+@@ -984,6 +1136,9 @@
+               drive->wcache = 1;
+ 
+       write_cache(drive, 1);
++
++      blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);     
++      blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn); 
+ }
+ 
+ static void ide_cacheflush_p(ide_drive_t *drive)
+diff -urN linux-2.6.16.original/drivers/ide/ide-io.c 
linux-2.6.16.hdaps/drivers/ide/ide-io.c
+--- linux-2.6.16.original/drivers/ide/ide-io.c 2006-03-20 05:53:29.000000000 
+0000
++++ linux-2.6.16.hdaps/drivers/ide/ide-io.c    2006-03-26 15:10:44.000000000 
+0100
+@@ -1180,6 +1180,17 @@
+               }
+ 
+               /*
++               * Don't accept a request when the queue is stopped (unless we
++               * are resuming from suspend). Prevents existing queue entries 
++               * being processed after queue is stopped by the hard disk 
++               * protection mechanism...
++               */
++              if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && 
!blk_pm_resume_request(rq)) {
++                      hwgroup->busy = 0;
++                      break;
++              }
++
++              /*
+                * Sanity: don't accept a request that isn't a PM request
+                * if we are currently power managed. This is very important as
+                * blk_stop_queue() doesn't prevent the elv_next_request()
+@@ -1660,6 +1671,9 @@
+               where = ELEVATOR_INSERT_FRONT;
+               rq->flags |= REQ_PREEMPT;
+       }
++      if (action == ide_next)
++              where = ELEVATOR_INSERT_FRONT;
++
+       __elv_add_request(drive->queue, rq, where, 0);
+       ide_do_request(hwgroup, IDE_NO_IRQ);
+       spin_unlock_irqrestore(&ide_lock, flags);
+diff -urN linux-2.6.16.original/drivers/scsi/libata-core.c 
linux-2.6.16.hdaps/drivers/scsi/libata-core.c
+--- linux-2.6.16.original/drivers/scsi/libata-core.c   2006-03-20 
05:53:29.000000000 +0000
++++ linux-2.6.16.hdaps/drivers/scsi/libata-core.c      2006-04-30 
13:25:24.000000000 +0100
+@@ -78,6 +78,10 @@
+ static unsigned int ata_unique_id = 1;
+ static struct workqueue_struct *ata_wq;
+ 
++int libata_protect_method = 0;
++module_param_named(protect_method, libata_protect_method, int, 0444);
++MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 
1=unload, 2=standby)");
++
+ int atapi_enabled = 0;
+ module_param(atapi_enabled, int, 0444);
+ MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 
1=on)");
+diff -urN linux-2.6.16.original/drivers/scsi/libata.h 
linux-2.6.16.hdaps/drivers/scsi/libata.h
+--- linux-2.6.16.original/drivers/scsi/libata.h        2006-03-20 
05:53:29.000000000 +0000
++++ linux-2.6.16.hdaps/drivers/scsi/libata.h   2006-04-30 13:25:33.000000000 
+0100
+@@ -40,6 +40,7 @@
+ };
+ 
+ /* libata-core.c */
++extern int libata_protect_method;
+ extern int atapi_enabled;
+ extern int libata_fua;
+ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
+diff -urN linux-2.6.16.original/drivers/scsi/libata-scsi.c 
linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c
+--- linux-2.6.16.original/drivers/scsi/libata-scsi.c   2006-03-20 
05:53:29.000000000 +0000
++++ linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c      2006-04-30 
13:24:56.000000000 +0100
+@@ -662,6 +662,42 @@
+       }
+ }
+ 
++extern int scsi_protect_queue(request_queue_t *q, int unload);
++extern int scsi_unprotect_queue(request_queue_t *q);
++
++static int ata_scsi_issue_protect_fn(request_queue_t *q)
++{
++      struct scsi_device *sdev = q->queuedata;
++      struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
++      struct ata_device *dev = &ap->device[sdev->id];
++      int unload;
++
++      if (libata_protect_method == 1) {
++              unload = 1;     
++              printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method 
requested, overriding drive capability check..\n");
++      }
++      else if (libata_protect_method == 2) {
++              unload = 0;     
++              printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method 
requested, overriding drive capability check..\n");
++      }
++      else if (ata_id_has_unload(dev->id)) {
++              unload = 1;
++              printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support 
reported by drive..\n");
++      }
++      else {
++              unload = 0;
++              printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support 
NOT reported by drive!..\n");
++      }
++
++      /* call scsi_protect_queue, requesting either unload or standby */
++      return scsi_protect_queue(q, unload);
++}
++
++static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
++{
++      return scsi_unprotect_queue(q);
++}
++
+ /**
+  *    ata_scsi_slave_config - Set SCSI device attributes
+  *    @sdev: SCSI device to examine
+@@ -712,6 +748,8 @@
+                       blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
+               }
+       }
++      blk_queue_issue_protect_fn(sdev->request_queue, 
ata_scsi_issue_protect_fn);     
++      blk_queue_issue_unprotect_fn(sdev->request_queue, 
ata_scsi_issue_unprotect_fn); 
+ 
+       return 0;       /* scsi layer doesn't check return value, sigh */
+ }
+diff -urN linux-2.6.16.original/drivers/scsi/scsi_lib.c 
linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c
+--- linux-2.6.16.original/drivers/scsi/scsi_lib.c      2006-03-20 
05:53:29.000000000 +0000
++++ linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c 2006-03-26 15:10:44.000000000 
+0100
+@@ -2307,3 +2307,189 @@
+       return 1;
+ }
+ EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
++
++/*
++ * As per scsi_wait_req_end_io(), which was removed in 2.6.15
++ */
++static void scsi_protect_wait_req_end_io(struct request *req, int error)
++{
++      BUG_ON(!req->waiting);
++
++      complete(req->waiting);
++}
++
++/*
++ * As per scsi_wait_done(), except calls scsi_device_block
++ * to block the queue at command completion. Only called by
++ * scsi_protect_wait().
++ * todo:
++ *  - we block the queue regardless of success and rely on the 
++ *    scsi_protect_queue function to unblock if the command
++ *    failed... should we also inspect here?
++ */
++static void scsi_protect_wait_done(struct scsi_cmnd *cmd)
++{
++      struct request *req = cmd->request;
++      struct request_queue *q = cmd->device->request_queue;
++      struct scsi_device *sdev = cmd->device;
++      unsigned long flags;
++
++      req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
++
++      spin_lock_irqsave(q->queue_lock, flags);
++      if (blk_rq_tagged(req))
++              blk_queue_end_tag(q, req);
++      spin_unlock_irqrestore(q->queue_lock, flags);
++
++      scsi_internal_device_block(sdev);
++
++      if (req->waiting)
++              complete(req->waiting);
++}
++
++/*
++ * As per scsi_wait_req(), except sets the completion function
++ * as scsi_protect_wait_done().
++ */
++void scsi_protect_wait_req(struct scsi_request *sreq, const void *cmnd, void 
*buffer,
++                 unsigned bufflen, int timeout, int retries)
++{
++      DECLARE_COMPLETION(wait);
++      
++      sreq->sr_request->waiting = &wait;
++      sreq->sr_request->rq_status = RQ_SCSI_BUSY;
++      sreq->sr_request->end_io = scsi_protect_wait_req_end_io;
++      scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_protect_wait_done,
++                      timeout, retries);
++      wait_for_completion(&wait);
++      sreq->sr_request->waiting = NULL;
++      if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
++              sreq->sr_result |= (DRIVER_ERROR << 24);
++
++      __scsi_release_request(sreq);
++}
++
++/*
++ * scsi_unprotect_queue()
++ *  - release the queue that was previously blocked
++ */
++int scsi_unprotect_queue(request_queue_t *q){
++
++      struct scsi_device *sdev = q->queuedata;
++      int rc = 0, pending = 0;
++      u8 scsi_cmd[MAX_COMMAND_SIZE];
++      struct scsi_sense_hdr sshdr;
++
++      if (sdev->sdev_state != SDEV_BLOCK)
++              return -ENXIO;
++
++      /* Are there any pending jobs on the queue? */
++      pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
++
++      rc = scsi_internal_device_unblock(sdev);
++      if (rc)
++              return rc;
++
++      if (!pending) {
++              printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, 
re-enabling power management..\n");
++
++              memset(scsi_cmd, 0, sizeof(scsi_cmd));
++              scsi_cmd[0]  = ATA_16;
++              scsi_cmd[1]  = (3 << 1); /* Non-data */
++              /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
++              scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
++
++              /* Good values for timeout and retries?  Values below
++                 from scsi_ioctl_send_command() for default case... */        
++              if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
++                           (10*HZ), 5))
++                      rc = -EIO;
++      }
++      return rc;
++}
++EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
++
++/*
++ * scsi_protect_queue()
++ *  - build and issue the park/standby command.. 
++ *  - queue is blocked during command completion handler
++ */
++int scsi_protect_queue(request_queue_t *q, int unload)
++{
++      struct scsi_device *sdev = q->queuedata;
++      int rc = 0;
++      u8 scsi_cmd[MAX_COMMAND_SIZE];
++      u8 args[7];
++      struct scsi_request *sreq;
++      unsigned char *sb, *desc;
++
++      if (sdev->sdev_state != SDEV_RUNNING)
++              return -ENXIO;
++
++      memset(args, 0, sizeof(args));
++
++      if (unload) {
++              args[0] = 0xe1;
++              args[1] = 0x44;
++              args[3] = 0x4c;
++              args[4] = 0x4e;
++              args[5] = 0x55;
++      } else
++              args[0] = 0xe0;
++
++      memset(scsi_cmd, 0, sizeof(scsi_cmd));
++      scsi_cmd[0]  = ATA_16;
++      scsi_cmd[1]  = (3 << 1); /* Non-data */
++      scsi_cmd[2]  = 0x20;     /* no off.line, or data xfer, request cc */
++      scsi_cmd[4]  = args[1];
<<Diff was trimmed, longer than 597 lines>>
_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to