Index: linux/Documentation/Configure.help
diff -u linux/Documentation/Configure.help:1.1.1.3 linux/Documentation/Configure.help:1.2
--- linux/Documentation/Configure.help:1.1.1.3	Fri Jan  7 22:33:08 2000
+++ linux/Documentation/Configure.help	Mon Jan 10 21:45:08 2000
@@ -4103,6 +4103,17 @@
   so most people can say N here and should in fact do so, because it
   is safer.
 
+Enable host blocking to help with buggy DMA chipsets
+CONFIG_SCSI_HOST_BLOCK
+  Some ISA DMA chipsets are buggy in the sense that if there is more
+  than one ISA DMA busmaster active at the same time that the system
+  becomes unstable.  In order to ensure stability, the host block
+  feature was added which ensures that only one such card has active
+  commands at one time.  You only need this if you have more than
+  one ISA busmaster on your system - this does not apply to PCI
+  hosts, and it certainly doesn't apply if you only have one
+  SCSI host adapter.
+
 Verbose SCSI error reporting (kernel size +=12K)
 CONFIG_SCSI_CONSTANTS
   The error messages regarding your SCSI hardware will be easier to
Index: linux/drivers/scsi/Config.in
diff -u linux/drivers/scsi/Config.in:1.1.1.3 linux/drivers/scsi/Config.in:1.2
--- linux/drivers/scsi/Config.in:1.1.1.3	Sat Dec 25 12:58:42 1999
+++ linux/drivers/scsi/Config.in	Mon Jan 10 21:44:27 2000
@@ -18,6 +18,7 @@
   
 bool '  Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
 bool '  SCSI logging facility' CONFIG_SCSI_LOGGING
+bool '  Enable host blocking to help with buggy DMA chipsets' CONFIG_SCSI_HOST_BLOCK
 
 mainmenu_option next_comment
 comment 'SCSI low-level drivers'
Index: linux/drivers/scsi/Makefile
diff -u linux/drivers/scsi/Makefile:1.1.1.4 linux/drivers/scsi/Makefile:1.2
--- linux/drivers/scsi/Makefile:1.1.1.4	Thu Jan  6 01:53:07 2000
+++ linux/drivers/scsi/Makefile	Mon Jan 10 21:44:27 2000
@@ -41,7 +41,7 @@
   endif
   L_OBJS += scsi_n_syms.o hosts.o scsi_ioctl.o constants.o scsicam.o
   L_OBJS += scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o 
-  L_OBJS += scsi_merge.o scsi_proc.o
+  L_OBJS += scsi_merge.o scsi_proc.o scsi_dma.o
 else
   ifeq ($(CONFIG_SCSI),m)
     MIX_OBJS += scsi_syms.o
@@ -722,10 +722,10 @@
 
 scsi_mod.o: $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o constants.o \
 		scsicam.o scsi_proc.o scsi_error.o scsi_obsolete.o \
-		scsi_queue.o scsi_lib.o scsi_merge.o
+		scsi_queue.o scsi_lib.o scsi_merge.o scsi_dma.o
 	$(LD) $(LD_RFLAG) -r -o $@ $(MIX_OBJS) hosts.o scsi.o scsi_ioctl.o \
 		constants.o scsicam.o scsi_proc.o scsi_merge.o     \
-		scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o
+		scsi_error.o scsi_obsolete.o scsi_queue.o scsi_lib.o scsi_dma.o
 
 sr_mod.o: sr.o sr_ioctl.o sr_vendor.o
 	$(LD) $(LD_RFLAG) -r -o $@ sr.o sr_ioctl.o sr_vendor.o
Index: linux/drivers/scsi/gdth.c
diff -u linux/drivers/scsi/gdth.c:1.1.1.1 linux/drivers/scsi/gdth.c:1.2
--- linux/drivers/scsi/gdth.c:1.1.1.1	Mon Jan  3 14:27:56 2000
+++ linux/drivers/scsi/gdth.c	Mon Jan  3 18:31:13 2000
@@ -3157,7 +3157,7 @@
             NUMDATA(shp)->busnum= 0;
 
             ha->pccb = CMDDATA(shp);
-            ha->pscratch = scsi_init_malloc(GDTH_SCRATCH, GFP_ATOMIC | GFP_DMA);
+            ha->pscratch = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, GDTH_SCRATCH_ORD);
             ha->scratch_busy = FALSE;
             ha->req_first = NULL;
             ha->tid_cnt = MAX_HDRIVES;
@@ -3172,7 +3172,7 @@
                 --gdth_ctr_count;
                 --gdth_ctr_vcount;
                 if (ha->pscratch != NULL)
-                    scsi_init_free((void *)ha->pscratch, GDTH_SCRATCH);
+                    free_pages((unsigned long)ha->pscratch, GDTH_SCRATCH_ORD);
                 free_irq(ha->irq,NULL);
                 scsi_unregister(shp);
                 continue;
@@ -3223,7 +3223,7 @@
                     NUMDATA(shp)->hanum));
 
             ha->pccb = CMDDATA(shp);
-            ha->pscratch = scsi_init_malloc(GDTH_SCRATCH, GFP_ATOMIC | GFP_DMA);
+            ha->pscratch = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, GDTH_SCRATCH_ORD);
             ha->scratch_busy = FALSE;
             ha->req_first = NULL;
             ha->tid_cnt = MAX_HDRIVES;
@@ -3238,7 +3238,7 @@
                 --gdth_ctr_count;
                 --gdth_ctr_vcount;
                 if (ha->pscratch != NULL)
-                    scsi_init_free((void *)ha->pscratch, GDTH_SCRATCH);
+                    free_pages((unsigned long)ha->pscratch, GDTH_SCRATCH_ORD);
                 free_irq(ha->irq,NULL);
                 scsi_unregister(shp);
                 continue;
@@ -3293,7 +3293,7 @@
             NUMDATA(shp)->busnum= 0;
 
             ha->pccb = CMDDATA(shp);
-            ha->pscratch = scsi_init_malloc(GDTH_SCRATCH, GFP_ATOMIC | GFP_DMA);
+            ha->pscratch = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, GDTH_SCRATCH_ORD);
             ha->scratch_busy = FALSE;
             ha->req_first = NULL;
             ha->tid_cnt = pcistr[ctr].device_id >= 0x200 ? MAXID : MAX_HDRIVES;
@@ -3308,7 +3308,7 @@
                 --gdth_ctr_count;
                 --gdth_ctr_vcount;
                 if (ha->pscratch != NULL)
-                    scsi_init_free((void *)ha->pscratch, GDTH_SCRATCH);
+                    free_pages((unsigned long)ha->pscratch, GDTH_SCRATCH_ORD);
                 free_irq(ha->irq,NULL);
                 scsi_unregister(shp);
                 continue;
@@ -3359,7 +3359,7 @@
         if (shp->dma_channel != 0xff) {
             free_dma(shp->dma_channel);
         }
-        scsi_init_free((void *)ha->pscratch, GDTH_SCRATCH);
+        free_pages((unsigned long)ha->pscratch, GDTH_SCRATCH_ORD);
         gdth_ctr_released++;
         TRACE2(("gdth_release(): HA %d of %d\n", 
                 gdth_ctr_released, gdth_ctr_count));
@@ -3561,22 +3561,19 @@
 {
     int             i;
     gdth_ha_str     *ha;
-    Scsi_Cmnd       scp;
-    Scsi_Device     sdev;
+    Scsi_Cmnd     * scp;
+    Scsi_Device   * sdev;
     gdth_cmd_str    gdtcmd;
 
     TRACE2(("gdth_flush() hanum %d\n",hanum));
     ha = HADATA(gdth_ctr_tab[hanum]);
-    memset(&sdev,0,sizeof(Scsi_Device));
-    memset(&scp, 0,sizeof(Scsi_Cmnd));
-    sdev.host = gdth_ctr_tab[hanum];
-    sdev.id = sdev.host->this_id;
-    scp.cmd_len = 12;
-    scp.host = gdth_ctr_tab[hanum];
-    scp.target = sdev.host->this_id;
-    scp.device = &sdev;
-    scp.use_sg = 0;
 
+    sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
+    scp  = scsi_allocate_device(sdev, 1, FALSE);
+
+    scp->cmd_len = 12;
+    scp->use_sg = 0;
+
     for (i = 0; i < MAX_HDRIVES; ++i) {
         if (ha->hdr[i].present) {
             gdtcmd.BoardNode = LOCALBOARD;
@@ -3586,9 +3583,11 @@
             gdtcmd.u.cache.BlockNo = 1;
             gdtcmd.u.cache.sg_canz = 0;
             TRACE2(("gdth_flush(): flush ha %d drive %d\n", hanum, i));
-            gdth_do_cmd(&scp, &gdtcmd, 30);
+            gdth_do_cmd(scp, &gdtcmd, 30);
         }
     }
+    scsi_release_command(scp);
+    scsi_free_host_dev(sdev);
 }
 
 /* shutdown routine */
@@ -3596,8 +3595,8 @@
 {
     int             hanum;
 #ifndef __alpha__
-    Scsi_Cmnd       scp;
-    Scsi_Device     sdev;
+    Scsi_Cmnd     * scp;
+    Scsi_Device   * sdev;
     gdth_cmd_str    gdtcmd;
 #endif
 
@@ -3610,23 +3609,21 @@
 
 #ifndef __alpha__
         /* controller reset */
-        memset(&sdev,0,sizeof(Scsi_Device));
-        memset(&scp, 0,sizeof(Scsi_Cmnd));
-        sdev.host = gdth_ctr_tab[hanum];
-        sdev.id = sdev.host->this_id;
-        scp.cmd_len = 12;
-        scp.host = gdth_ctr_tab[hanum];
-        scp.target = sdev.host->this_id;
-        scp.device = &sdev;
-        scp.use_sg = 0;
+	sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
+	scp  = scsi_allocate_device(sdev, 1, FALSE);
+        scp->cmd_len = 12;
+        scp->use_sg = 0;
 
         gdtcmd.BoardNode = LOCALBOARD;
         gdtcmd.Service = CACHESERVICE;
         gdtcmd.OpCode = GDT_RESET;
         TRACE2(("gdth_halt(): reset controller %d\n", hanum));
-        gdth_do_cmd(&scp, &gdtcmd, 10);
+        gdth_do_cmd(scp, &gdtcmd, 10);
+	scsi_release_command(scp);
+	scsi_free_host_dev(sdev);
 #endif
     }
+
     printk("Done.\n");
 
 #ifdef GDTH_STATISTICS
Index: linux/drivers/scsi/gdth.h
diff -u linux/drivers/scsi/gdth.h:1.1.1.1 linux/drivers/scsi/gdth.h:1.2
--- linux/drivers/scsi/gdth.h:1.1.1.1	Mon Jan  3 14:27:56 2000
+++ linux/drivers/scsi/gdth.h	Mon Jan  3 18:31:14 2000
@@ -126,7 +126,8 @@
 #endif
 
 /* limits */
-#define GDTH_SCRATCH    4096                    /* 4KB scratch buffer */
+#define GDTH_SCRATCH    PAGE_SIZE                    /* 4KB scratch buffer */
+#define GDTH_SCRATCH_ORD 0                      /* order 0 means 1 page */
 #define GDTH_MAXCMDS    124
 #define GDTH_MAXC_P_L   16                      /* max. cmds per lun */
 #define GDTH_MAX_RAW    2                       /* max. cmds per raw device */
Index: linux/drivers/scsi/gdth_proc.c
diff -u linux/drivers/scsi/gdth_proc.c:1.1.1.1 linux/drivers/scsi/gdth_proc.c:1.3
--- linux/drivers/scsi/gdth_proc.c:1.1.1.1	Mon Jan  3 14:45:15 2000
+++ linux/drivers/scsi/gdth_proc.c	Mon Jan 10 21:44:27 2000
@@ -31,22 +31,17 @@
 static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
 {
     int             ret_val;
-    Scsi_Cmnd       scp;
-    Scsi_Device     sdev;
+    Scsi_Cmnd     * scp;
+    Scsi_Device   * sdev;
     gdth_iowr_str   *piowr;
 
     TRACE2(("gdth_set_info() ha %d bus %d\n",hanum,busnum));
     piowr = (gdth_iowr_str *)buffer;
 
-    memset(&sdev,0,sizeof(Scsi_Device));
-    memset(&scp, 0,sizeof(Scsi_Cmnd));
-    sdev.host = gdth_ctr_vtab[vh];
-    sdev.id = sdev.host->this_id;
-    scp.cmd_len = 12;
-    scp.host = gdth_ctr_vtab[vh];
-    scp.target = sdev.host->this_id;
-    scp.device = &sdev;
-    scp.use_sg = 0;
+    sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
+    scp  = scsi_allocate_device(sdev, 1, FALSE);
+    scp->cmd_len = 12;
+    scp->use_sg = 0;
 
     if (length >= 4) {
         if (strncmp(buffer,"gdth",4) == 0) {
@@ -62,10 +57,14 @@
     } else {
         ret_val = -EINVAL;
     }
+
+    scsi_release_command(scp);
+    scsi_free_host_dev(sdev);
+
     return ret_val;
 }
          
-static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd * scp)
 {
     int             orig_length, drive, wb_mode;
     int             i, found;
@@ -105,7 +104,7 @@
                 gdtcmd.u.cache.DeviceNo = i;
                 gdtcmd.u.cache.BlockNo = 1;
                 gdtcmd.u.cache.sg_canz = 0;
-                gdth_do_cmd(&scp, &gdtcmd, 30);
+                gdth_do_cmd(scp, &gdtcmd, 30);
             }
         }
         if (!found)
@@ -158,7 +157,7 @@
         gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
         gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
         pcpar->write_back = wb_mode==1 ? 0:1;
-        gdth_do_cmd(&scp, &gdtcmd, 30);
+        gdth_do_cmd(scp, &gdtcmd, 30);
         gdth_ioctl_free(hanum);
         printk("Done.\n");
         return(orig_length);
@@ -168,7 +167,7 @@
     return(-EINVAL);
 }
 
-static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd * scp)
 {
     unchar          i, j;
     gdth_ha_str     *ha;
@@ -241,8 +240,8 @@
             *ppadd2 = virt_to_bus(piord->iu.general.data+add_size);
         }
         /* do IOCTL */
-        gdth_do_cmd(&scp, pcmd, piowr->timeout);
-        piord->status = (ulong32)scp.SCp.Message;
+        gdth_do_cmd(scp, pcmd, piowr->timeout);
+        piord->status = (ulong32)scp->SCp.Message;
         break;
 
       case GDTIOCTL_DRVERS:
@@ -401,8 +400,8 @@
 
     gdth_cmd_str gdtcmd;
     gdth_evt_str estr;
-    Scsi_Cmnd scp;
-    Scsi_Device sdev;
+    Scsi_Cmnd  * scp;
+    Scsi_Device *sdev;
     char hrec[161];
     struct timeval tv;
 
@@ -417,15 +416,10 @@
     ha = HADATA(gdth_ctr_tab[hanum]);
     id = length;
 
-    memset(&sdev,0,sizeof(Scsi_Device));
-    memset(&scp, 0,sizeof(Scsi_Cmnd));
-    sdev.host = gdth_ctr_vtab[vh];
-    sdev.id = sdev.host->this_id;
-    scp.cmd_len = 12;
-    scp.host = gdth_ctr_vtab[vh];
-    scp.target = sdev.host->this_id;
-    scp.device = &sdev;
-    scp.use_sg = 0;
+    sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
+    scp  = scsi_allocate_device(sdev, 1, FALSE);
+    scp->cmd_len = 12;
+    scp->use_sg = 0;
 
     /* look for buffer ID in length */
     if (id > 1) {
@@ -531,11 +525,11 @@
                     sizeof(pds->list[0]);
                 if (pds->entries > cnt)
                     pds->entries = cnt;
-                gdth_do_cmd(&scp, &gdtcmd, 30);
-                if (scp.SCp.Message != S_OK) 
+                gdth_do_cmd(scp, &gdtcmd, 30);
+                if (scp->SCp.Message != S_OK) 
                     pds->count = 0;
                 TRACE2(("pdr_statistics() entries %d status %d\n",
-                        pds->count, scp.SCp.Message));
+                        pds->count, scp->SCp.Message));
 
                 /* other IOCTLs must fit into area GDTH_SCRATCH/4 */
                 for (j = 0; j < ha->raw[i].pdev_cnt; ++j) {
@@ -551,8 +545,8 @@
                     gdtcmd.u.ioctl.subfunc = SCSI_DR_INFO | L_CTRL_PATTERN;
                     gdtcmd.u.ioctl.channel = 
                         ha->raw[i].address | ha->raw[i].id_list[j];
-                    gdth_do_cmd(&scp, &gdtcmd, 30);
-                    if (scp.SCp.Message == S_OK) {
+                    gdth_do_cmd(scp, &gdtcmd, 30);
+                    if (scp->SCp.Message == S_OK) {
                         strncpy(hrec,pdi->vendor,8);
                         strncpy(hrec+8,pdi->product,16);
                         strncpy(hrec+24,pdi->revision,4);
@@ -602,8 +596,8 @@
                         gdtcmd.u.ioctl.channel = 
                             ha->raw[i].address | ha->raw[i].id_list[j];
                         pdef->sddc_type = 0x08;
-                        gdth_do_cmd(&scp, &gdtcmd, 30);
-                        if (scp.SCp.Message == S_OK) {
+                        gdth_do_cmd(scp, &gdtcmd, 30);
+                        if (scp->SCp.Message == S_OK) {
                             size = sprintf(buffer+len,
                                            " Grown Defects:\t%d\n",
                                            pdef->sddc_cnt);
@@ -649,8 +643,8 @@
                     gdtcmd.u.ioctl.param_size = sizeof(gdth_cdrinfo_str);
                     gdtcmd.u.ioctl.subfunc = CACHE_DRV_INFO;
                     gdtcmd.u.ioctl.channel = drv_no;
-                    gdth_do_cmd(&scp, &gdtcmd, 30);
-                    if (scp.SCp.Message != S_OK)
+                    gdth_do_cmd(scp, &gdtcmd, 30);
+                    if (scp->SCp.Message != S_OK)
                         break;
                     pcdi->ld_dtype >>= 16;
                     j++;
@@ -746,8 +740,8 @@
                 gdtcmd.u.ioctl.param_size = sizeof(gdth_arrayinf_str);
                 gdtcmd.u.ioctl.subfunc = ARRAY_INFO | LA_CTRL_PATTERN;
                 gdtcmd.u.ioctl.channel = i;
-                gdth_do_cmd(&scp, &gdtcmd, 30);
-                if (scp.SCp.Message == S_OK) {
+                gdth_do_cmd(scp, &gdtcmd, 30);
+                if (scp->SCp.Message == S_OK) {
                     if (pai->ai_state == 0)
                         strcpy(hrec, "idle");
                     else if (pai->ai_state == 2)
@@ -821,8 +815,8 @@
                 gdtcmd.u.ioctl.channel = i;
                 phg->entries = MAX_HDRIVES;
                 phg->offset = GDTOFFSOF(gdth_hget_str, entry[0]); 
-                gdth_do_cmd(&scp, &gdtcmd, 30);
-                if (scp.SCp.Message != S_OK) {
+                gdth_do_cmd(scp, &gdtcmd, 30);
+                if (scp->SCp.Message != S_OK) {
                     ha->hdr[i].ldr_no = i;
                     ha->hdr[i].rw_attribs = 0;
                     ha->hdr[i].start_sec = 0;
@@ -837,7 +831,7 @@
                     }
                 }
                 TRACE2(("host_get entries %d status %d\n",
-                        phg->entries, scp.SCp.Message));
+                        phg->entries, scp->SCp.Message));
             }
             gdth_ioctl_free(hanum);
 
@@ -915,6 +909,10 @@
     }
 
 stop_output:
+
+    scsi_release_command(scp);
+    scsi_free_host_dev(sdev);
+
     *start = buffer +(offset-begin);
     len -= (offset-begin);
     if (len > length)
Index: linux/drivers/scsi/gdth_proc.h
diff -u linux/drivers/scsi/gdth_proc.h:1.1.1.1 linux/drivers/scsi/gdth_proc.h:1.2
--- linux/drivers/scsi/gdth_proc.h:1.1.1.1	Mon Jan  3 15:22:51 2000
+++ linux/drivers/scsi/gdth_proc.h	Mon Jan  3 18:31:14 2000
@@ -6,8 +6,8 @@
  */
 
 static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum);
-static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
-static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd * scp);
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd * scp);
 static int gdth_get_info(char *buffer,char **start,off_t offset,
                          int length,int vh,int hanum,int busnum);
 
Index: linux/drivers/scsi/hosts.c
diff -u linux/drivers/scsi/hosts.c:1.1.1.2 linux/drivers/scsi/hosts.c:1.2
--- linux/drivers/scsi/hosts.c:1.1.1.2	Sat Dec 18 18:33:45 1999
+++ linux/drivers/scsi/hosts.c	Mon Jan 10 21:44:27 2000
@@ -869,7 +869,8 @@
     printk ("scsi : %d host%s.\n", next_scsi_host,
 	    (next_scsi_host == 1) ? "" : "s");
     
-    
+    scsi_make_blocked_list();
+        
     /* Now attach the high level drivers */
 #ifdef CONFIG_BLK_DEV_SD
     scsi_register_device(&sd_template);
Index: linux/drivers/scsi/hosts.h
diff -u linux/drivers/scsi/hosts.h:1.1.1.4 linux/drivers/scsi/hosts.h:1.4
--- linux/drivers/scsi/hosts.h:1.1.1.4	Fri Jan  7 22:33:08 2000
+++ linux/drivers/scsi/hosts.h	Mon Jan 10 21:44:27 2000
@@ -334,6 +334,13 @@
     unsigned int max_lun;
     unsigned int max_channel;
 
+    /*
+     * Pointer to a circularly linked list - this indicates the hosts
+     * that should be locked out of performing I/O while we have an active
+     * command on this host.
+     */
+    struct Scsi_Host * block;
+    unsigned wish_block:1;
 
     /* These parameters should be set by the detect routine */
     unsigned long base;
Index: linux/drivers/scsi/scsi.c
diff -u linux/drivers/scsi/scsi.c:1.1.1.7 linux/drivers/scsi/scsi.c:1.12
--- linux/drivers/scsi/scsi.c:1.1.1.7	Fri Jan  7 22:33:08 2000
+++ linux/drivers/scsi/scsi.c	Mon Jan 10 22:17:27 2000
@@ -86,21 +86,6 @@
  * Definitions and constants.
  */
 
-/*
- * PAGE_SIZE must be a multiple of the sector size (512).  True
- * for all reasonably recent architectures (even the VAX...).
- */
-#define SECTOR_SIZE		512
-#define SECTORS_PER_PAGE	(PAGE_SIZE/SECTOR_SIZE)
-
-#if SECTORS_PER_PAGE <= 8
-typedef unsigned char FreeSectorBitmap;
-#elif SECTORS_PER_PAGE <= 32
-typedef unsigned int FreeSectorBitmap;
-#else
-#error You lose.
-#endif
-
 #define MIN_RESET_DELAY (2*HZ)
 
 /* Do not call reset on error if we just did a reset within 15 sec. */
@@ -139,12 +124,6 @@
 static unsigned long serial_number = 0;
 static Scsi_Cmnd *scsi_bh_queue_head = NULL;
 static Scsi_Cmnd *scsi_bh_queue_tail = NULL;
-static FreeSectorBitmap *dma_malloc_freelist = NULL;
-static int need_isa_bounce_buffers;
-static unsigned int dma_sectors = 0;
-unsigned int scsi_dma_free_sectors = 0;
-unsigned int scsi_need_isa_buffer = 0;
-static unsigned char **dma_malloc_pages = NULL;
 
 /*
  * Note - the initial logging level can be set here to log events at boot time.
@@ -173,7 +152,6 @@
 /* 
  * Function prototypes.
  */
-static void resize_dma_pool(void);
 static void print_inquiry(unsigned char *data);
 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
 static int scan_scsis_single(int channel, int dev, int lun, int *max_scsi_dev,
@@ -290,6 +268,47 @@
 	{NULL, NULL, NULL}
 };
 
+
+/*
+ * Function:    scsi_get_request_handler()
+ *
+ * Purpose:     Selects queue handler function for a device.
+ *
+ * Arguments:   SDpnt   - device for which we need a handler function.
+ *
+ * Returns:     Nothing
+ *
+ * Lock status: No locking assumed or required.
+ *
+ * Notes:       Most devices will end up using scsi_request_fn for the
+ *              handler function (at least as things are done now).
+ *              The "block" feature basically ensures that only one of
+ *              the blocked hosts is active at one time, mainly to work around
+ *              buggy DMA chipsets where the memory gets starved.
+ *              For this case, we have a special handler function, which
+ *              does some checks and ultimately calls scsi_request_fn.
+ *
+ *              As a future enhancement, it might be worthwhile to add support
+ *              for stacked handlers - there might get to be too many permutations
+ *              otherwise.  Then again, we might just have one handler that does
+ *              all of the special cases (a little bit slower), and those devices
+ *              that don't need the special case code would directly call 
+ *              scsi_request_fn.
+ *
+ *              As it stands, I can think of a number of special cases that
+ *              we might need to handle.  This would not only include the blocked
+ *              case, but single_lun (for changers), and any special handling
+ *              we might need for a spun-down disk to spin it back up again.
+ */
+static request_fn_proc * scsi_get_request_handler(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) {
+#ifdef CONFIG_SCSI_HOST_BLOCK
+        if( SHpnt->wish_block ) {
+                return scsi_blocked_request_fn;
+        }
+#endif
+        return scsi_request_fn;
+}
+
 static int get_device_flags(unsigned char *response_data)
 {
 	int i = 0;
@@ -314,7 +333,6 @@
 	return 0;
 }
 
-
 static void scan_scsis_done(Scsi_Cmnd * SCpnt)
 {
 
@@ -437,7 +455,7 @@
 			 * the queue actually represents.   We could look it up, but it
 			 * is pointless work.
 			 */
-			blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+			blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, shpnt));
 			blk_queue_headactive(&SDpnt->request_queue, 0);
 			SDpnt->request_queue.queuedata = (void *) SDpnt;
 			/* Make sure we have something that is valid for DMA purposes */
@@ -520,7 +538,7 @@
 					}
 				}
 			}
-			resize_dma_pool();
+			scsi_resize_dma_pool();
 
 			for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
 				if (sdtpnt->finish && sdtpnt->nr_dev) {
@@ -882,7 +900,7 @@
 	 * the queue actually represents.   We could look it up, but it
 	 * is pointless work.
 	 */
-	blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+	blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, shpnt));
 	blk_queue_headactive(&SDpnt->request_queue, 0);
 	SDpnt->request_queue.queuedata = (void *) SDpnt;
 	SDpnt->host = shpnt;
@@ -990,11 +1008,6 @@
 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
 
 /*
- * Used for access to internal allocator used for DMA safe buffers.
- */
-static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
-
-/*
  * Used to protect insertion into and removal from the queue of
  * commands to be processed by the bottom half handler.
  */
@@ -1810,127 +1823,6 @@
 static void scsi_unregister_host(Scsi_Host_Template *);
 #endif
 
-/*
- * Function:    scsi_malloc
- *
- * Purpose:     Allocate memory from the DMA-safe pool.
- *
- * Arguments:   len       - amount of memory we need.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Pointer to memory block.
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- *
- *              We cannot use the normal system allocator becuase we need
- *              to be able to guarantee that we can process a complete disk
- *              I/O request without touching the system allocator.  Think
- *              about it - if the system were heavily swapping, and tried to
- *              write out a block of memory to disk, and the SCSI code needed
- *              to allocate more memory in order to be able to write the
- *              data to disk, you would wedge the system.
- */
-void *scsi_malloc(unsigned int len)
-{
-	unsigned int nbits, mask;
-	unsigned long flags;
-
-	int i, j;
-	if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
-		return NULL;
-
-	nbits = len >> 9;
-	mask = (1 << nbits) - 1;
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-		for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
-			if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
-				dma_malloc_freelist[i] |= (mask << j);
-				scsi_dma_free_sectors -= nbits;
-#ifdef DEBUG
-				SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
-				printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
-#endif
-				spin_unlock_irqrestore(&allocator_request_lock, flags);
-				return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
-			}
-		}
-	spin_unlock_irqrestore(&allocator_request_lock, flags);
-	return NULL;		/* Nope.  No more */
-}
-
-/*
- * Function:    scsi_free
- *
- * Purpose:     Free memory into the DMA-safe pool.
- *
- * Arguments:   ptr       - data block we are freeing.
- *              len       - size of block we are freeing.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       This function *must* only be used to free memory
- *              allocated from scsi_malloc().
- *
- *              Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- */
-int scsi_free(void *obj, unsigned int len)
-{
-	unsigned int page, sector, nbits, mask;
-	unsigned long flags;
-
-#ifdef DEBUG
-	unsigned long ret = 0;
-
-#ifdef __mips__
-	__asm__ __volatile__("move\t%0,$31":"=r"(ret));
-#else
-	ret = __builtin_return_address(0);
-#endif
-	printk("scsi_free %p %d\n", obj, len);
-	SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
-#endif
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
-		unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
-		if ((unsigned long) obj >= page_addr &&
-		    (unsigned long) obj < page_addr + PAGE_SIZE) {
-			sector = (((unsigned long) obj) - page_addr) >> 9;
-
-			nbits = len >> 9;
-			mask = (1 << nbits) - 1;
-
-			if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
-				panic("scsi_free:Bad memory alignment");
-
-			if ((dma_malloc_freelist[page] &
-			     (mask << sector)) != (mask << sector)) {
-#ifdef DEBUG
-				printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
-				       obj, len, ret);
-#endif
-				panic("scsi_free:Trying to free unused memory");
-			}
-			scsi_dma_free_sectors += nbits;
-			dma_malloc_freelist[page] &= ~(mask << sector);
-			spin_unlock_irqrestore(&allocator_request_lock, flags);
-			return 0;
-		}
-	}
-	panic("scsi_free:Bad offset");
-}
-
 
 int scsi_loadable_module_flag;	/* Set after we scan builtin drivers */
 
@@ -2114,7 +2006,7 @@
 	/*
 	 * This should build the DMA pool.
 	 */
-	resize_dma_pool();
+	scsi_resize_dma_pool();
 
 	/*
 	 * OK, now we finish the initialization by doing spin-up, read
@@ -2465,217 +2357,6 @@
 }
 #endif
 
-/*
- * Function:    resize_dma_pool
- *
- * Purpose:     Ensure that the DMA pool is sufficiently large to be
- *              able to guarantee that we can always process I/O requests
- *              without calling the system allocator.
- *
- * Arguments:   None.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              Go through the device list and recompute the most appropriate
- *              size for the dma pool.  Then grab more memory (as required).
- */
-static void resize_dma_pool(void)
-{
-	int i, k;
-	unsigned long size;
-	unsigned long flags;
-	struct Scsi_Host *shpnt;
-	struct Scsi_Host *host = NULL;
-	Scsi_Device *SDpnt;
-	FreeSectorBitmap *new_dma_malloc_freelist = NULL;
-	unsigned int new_dma_sectors = 0;
-	unsigned int new_need_isa_buffer = 0;
-	unsigned char **new_dma_malloc_pages = NULL;
-	int out_of_space = 0;
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	if (!scsi_hostlist) {
-		/*
-		 * Free up the DMA pool.
-		 */
-		if (scsi_dma_free_sectors != dma_sectors)
-			panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
-
-		for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-			scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
-		if (dma_malloc_pages)
-			scsi_init_free((char *) dma_malloc_pages,
-				       (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages));
-		dma_malloc_pages = NULL;
-		if (dma_malloc_freelist)
-			scsi_init_free((char *) dma_malloc_freelist,
-				       (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_freelist));
-		dma_malloc_freelist = NULL;
-		dma_sectors = 0;
-		scsi_dma_free_sectors = 0;
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		return;
-	}
-	/* Next, check to see if we need to extend the DMA buffer pool */
-
-	new_dma_sectors = 2 * SECTORS_PER_PAGE;		/* Base value we use */
-
-	if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
-		need_isa_bounce_buffers = 1;
-	else
-		need_isa_bounce_buffers = 0;
-
-	if (scsi_devicelist)
-		for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
-			new_dma_sectors += SECTORS_PER_PAGE;	/* Increment for each host */
-
-	for (host = scsi_hostlist; host; host = host->next) {
-		for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
-			/*
-			 * sd and sr drivers allocate scatterlists.
-			 * sr drivers may allocate for each command 1x2048 or 2x1024 extra
-			 * buffers for 2k sector size and 1k fs.
-			 * sg driver allocates buffers < 4k.
-			 * st driver does not need buffers from the dma pool.
-			 * estimate 4k buffer/command for devices of unknown type (should panic).
-			 */
-			if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
-			    SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
-				new_dma_sectors += ((host->sg_tablesize *
-				sizeof(struct scatterlist) + 511) >> 9) *
-				 SDpnt->queue_depth;
-				if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
-					new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
-			} else if (SDpnt->type == TYPE_SCANNER ||
-				   SDpnt->type == TYPE_PROCESSOR ||
-				   SDpnt->type == TYPE_MEDIUM_CHANGER ||
-				   SDpnt->type == TYPE_ENCLOSURE) {
-				new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-			} else {
-				if (SDpnt->type != TYPE_TAPE) {
-					printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
-					new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-				}
-			}
-
-			if (host->unchecked_isa_dma &&
-			    need_isa_bounce_buffers &&
-			    SDpnt->type != TYPE_TAPE) {
-				new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
-				    SDpnt->queue_depth;
-				new_need_isa_buffer++;
-			}
-		}
-	}
-
-#ifdef DEBUG_INIT
-	printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
-#endif
-
-	/* limit DMA memory to 32MB: */
-	new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-
-	/*
-	 * We never shrink the buffers - this leads to
-	 * race conditions that I would rather not even think
-	 * about right now.
-	 */
-#if 0				/* Why do this? No gain and risks out_of_space */
-	if (new_dma_sectors < dma_sectors)
-		new_dma_sectors = dma_sectors;
-#endif
-	if (new_dma_sectors <= dma_sectors) {
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		return;		/* best to quit while we are in front */
-        }
-
-	for (k = 0; k < 20; ++k) {	/* just in case */
-		out_of_space = 0;
-		size = (new_dma_sectors / SECTORS_PER_PAGE) *
-		    sizeof(FreeSectorBitmap);
-		new_dma_malloc_freelist = (FreeSectorBitmap *)
-		    scsi_init_malloc(size, GFP_ATOMIC);
-		if (new_dma_malloc_freelist) {
-			size = (new_dma_sectors / SECTORS_PER_PAGE) *
-			    sizeof(*new_dma_malloc_pages);
-			new_dma_malloc_pages = (unsigned char **)
-			    scsi_init_malloc(size, GFP_ATOMIC);
-			if (!new_dma_malloc_pages) {
-				size = (new_dma_sectors / SECTORS_PER_PAGE) *
-				    sizeof(FreeSectorBitmap);
-				scsi_init_free((char *) new_dma_malloc_freelist, size);
-				out_of_space = 1;
-			}
-		} else
-			out_of_space = 1;
-
-		if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
-			for (i = dma_sectors / SECTORS_PER_PAGE;
-			   i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
-				new_dma_malloc_pages[i] = (unsigned char *)
-				    scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
-				if (!new_dma_malloc_pages[i])
-					break;
-			}
-			if (i != new_dma_sectors / SECTORS_PER_PAGE) {	/* clean up */
-				int k = i;
-
-				out_of_space = 1;
-				for (i = 0; i < k; ++i)
-					scsi_init_free(new_dma_malloc_pages[i], PAGE_SIZE);
-			}
-		}
-		if (out_of_space) {	/* try scaling down new_dma_sectors request */
-			printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
-			       "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
-			if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
-				break;	/* pretty well hopeless ... */
-			new_dma_sectors = (new_dma_sectors * 3) / 4;
-			new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-			if (new_dma_sectors <= dma_sectors)
-				break;	/* stick with what we have got */
-		} else
-			break;	/* found space ... */
-	}			/* end of for loop */
-	if (out_of_space) {
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		scsi_need_isa_buffer = new_need_isa_buffer;	/* some useful info */
-		printk("      WARNING, not enough memory, pool not expanded\n");
-		return;
-	}
-	/* When we dick with the actual DMA list, we need to
-	 * protect things
-	 */
-	if (dma_malloc_freelist) {
-		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-		memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
-		scsi_init_free((char *) dma_malloc_freelist, size);
-	}
-	dma_malloc_freelist = new_dma_malloc_freelist;
-
-	if (dma_malloc_pages) {
-		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
-		memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
-		scsi_init_free((char *) dma_malloc_pages, size);
-	}
-	scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
-	dma_malloc_pages = new_dma_malloc_pages;
-	dma_sectors = new_dma_sectors;
-	scsi_need_isa_buffer = new_need_isa_buffer;
-
-	spin_unlock_irqrestore(&allocator_request_lock, flags);
-
-#ifdef DEBUG_INIT
-	printk("resize_dma_pool: dma free sectors   = %d\n", scsi_dma_free_sectors);
-	printk("resize_dma_pool: dma sectors        = %d\n", dma_sectors);
-	printk("resize_dma_pool: need isa buffers   = %d\n", scsi_need_isa_buffer);
-#endif
-}
-
 #ifdef CONFIG_MODULES		/* a big #ifdef block... */
 
 /*
@@ -2771,6 +2452,8 @@
 		printk("scsi : %d host%s.\n", next_scsi_host,
 		       (next_scsi_host == 1) ? "" : "s");
 
+		scsi_make_blocked_list();
+
 		/* The next step is to call scan_scsis here.  This generates the
 		 * Scsi_Devices entries
 		 */
@@ -2809,7 +2492,7 @@
 		 * Now that we have all of the devices, resize the DMA pool,
 		 * as required.  */
 		if (!out_of_space)
-			resize_dma_pool();
+			scsi_resize_dma_pool();
 
 
 		/* This does any final handling that is required. */
@@ -3027,7 +2710,7 @@
 	 * do the right thing and free everything.
 	 */
 	if (!scsi_hosts)
-		resize_dma_pool();
+		scsi_resize_dma_pool();
 
 	printk("scsi : %d host%s.\n", next_scsi_host,
 	       (next_scsi_host == 1) ? "" : "s");
@@ -3039,6 +2722,7 @@
 	       (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
 #endif
 
+	scsi_make_blocked_list();
 
 	/* There were some hosts that were loaded at boot time, so we cannot
 	   do any more than this */
@@ -3122,7 +2806,7 @@
 	if (tpnt->finish && tpnt->nr_dev)
 		(*tpnt->finish) ();
 	if (!out_of_space)
-		resize_dma_pool();
+		scsi_resize_dma_pool();
 	MOD_INC_USE_COUNT;
 
 	if (out_of_space) {
@@ -3372,39 +3056,11 @@
 
 	scsi_loadable_module_flag = 1;
 
-	dma_sectors = PAGE_SIZE / SECTOR_SIZE;
-	scsi_dma_free_sectors = dma_sectors;
-	/*
-	 * Set up a minimal DMA buffer list - this will be used during scan_scsis
-	 * in some cases.
-	 */
+        if( scsi_init_minimal_dma_pool() == 0 )
+        {
+                return 1;
+        }
 
-	/* One bit per sector to indicate free/busy */
-	size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-	dma_malloc_freelist = (FreeSectorBitmap *)
-	    scsi_init_malloc(size, GFP_ATOMIC);
-	if (dma_malloc_freelist) {
-		/* One pointer per page for the page list */
-		dma_malloc_pages = (unsigned char **) scsi_init_malloc(
-									      (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
-							     GFP_ATOMIC);
-		if (dma_malloc_pages) {
-			dma_malloc_pages[0] = (unsigned char *)
-			    scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
-			if (dma_malloc_pages[0])
-				has_space = 1;
-		}
-	}
-	if (!has_space) {
-		if (dma_malloc_freelist) {
-			scsi_init_free((char *) dma_malloc_freelist, size);
-			if (dma_malloc_pages)
-				scsi_init_free((char *) dma_malloc_pages,
-					       (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages));
-		}
-		printk("scsi::init_module: failed, out of memory\n");
-		return 1;
-	}
 	/*
 	 * This is where the processing takes place for most everything
 	 * when commands are completed.
@@ -3427,7 +3083,7 @@
 	/*
 	 * Free up the DMA pool.
 	 */
-	resize_dma_pool();
+	scsi_resize_dma_pool();
 
 }
 
@@ -3481,7 +3137,7 @@
 
         SDpnt->device_queue = SCpnt;
 
-        blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
+        blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, SDpnt->host));
         blk_queue_headactive(&SDpnt->request_queue, 0);
         SDpnt->request_queue.queuedata = (void *) SDpnt;
 
@@ -3509,7 +3165,7 @@
  */
 void scsi_free_host_dev(Scsi_Device * SDpnt)
 {
-        if( SDpnt->id != SDpnt->host->this_id )
+        if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
         {
                 panic("Attempt to delete wrong device\n");
         }
Index: linux/drivers/scsi/scsi.h
diff -u linux/drivers/scsi/scsi.h:1.1.1.5 linux/drivers/scsi/scsi.h:1.6
--- linux/drivers/scsi/scsi.h:1.1.1.5	Fri Jan  7 22:33:08 2000
+++ linux/drivers/scsi/scsi.h	Mon Jan 10 21:44:27 2000
@@ -365,90 +365,121 @@
  *  Initializes all SCSI devices.  This scans all scsi busses.
  */
 
-extern int scsi_dev_init(void);
-
-
-
-void *scsi_malloc(unsigned int);
-int scsi_free(void *, unsigned int);
 extern unsigned int scsi_logging_level;		/* What do we log? */
 extern unsigned int scsi_dma_free_sectors;	/* How much room do we have left */
 extern unsigned int scsi_need_isa_buffer;	/* True if some devices need indirection
 						   * buffers */
-extern void scsi_make_blocked_list(void);
 extern volatile int in_scan_scsis;
 extern const unsigned char scsi_command_size[8];
 
+
 /*
  * These are the error handling functions defined in scsi_error.c
  */
+extern void scsi_times_out(Scsi_Cmnd * SCpnt);
 extern void scsi_add_timer(Scsi_Cmnd * SCset, int timeout,
 			   void (*complete) (Scsi_Cmnd *));
-extern void scsi_done(Scsi_Cmnd * SCpnt);
 extern int scsi_delete_timer(Scsi_Cmnd * SCset);
 extern void scsi_error_handler(void *host);
-extern int scsi_retry_command(Scsi_Cmnd *);
-extern void scsi_finish_command(Scsi_Cmnd *);
 extern int scsi_sense_valid(Scsi_Cmnd *);
 extern int scsi_decide_disposition(Scsi_Cmnd * SCpnt);
 extern int scsi_block_when_processing_errors(Scsi_Device *);
 extern void scsi_sleep(int);
+
+/*
+ * Prototypes for functions in scsicam.c
+ */
 extern int  scsi_partsize(struct buffer_head *bh, unsigned long capacity,
                     unsigned int *cyls, unsigned int *hds,
                     unsigned int *secs);
 
 /*
+ * Prototypes for functions in scsi_dma.c
+ */
+void scsi_resize_dma_pool(void);
+int scsi_init_minimal_dma_pool(void);
+void *scsi_malloc(unsigned int);
+int scsi_free(void *, unsigned int);
+
+/*
  * Prototypes for functions in scsi_merge.c
  */
 extern void recount_segments(Scsi_Cmnd * SCpnt);
+extern void initialize_merge_fn(Scsi_Device * SDpnt);
 
 /*
+ * Prototypes for functions in scsi_queue.c
+ */
+extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason);
+
+/*
  * Prototypes for functions in scsi_lib.c
  */
-extern void initialize_merge_fn(Scsi_Device * SDpnt);
-extern void scsi_request_fn(request_queue_t * q);
+extern void scsi_blocked_request_fn(request_queue_t * q);
+extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate,
+				   int sectors);
+extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *);
+extern int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt);
+extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int);
+extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
+			       int block_sectors);
+extern void scsi_make_blocked_list(void);
 extern void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt);
+extern void scsi_request_fn(request_queue_t * q);
 
-extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int);
-extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
 
 /*
  * Prototypes for functions in scsi.c
  */
-
-/*
- *  scsi_abort aborts the current command that is executing on host host.
- *  The error code, if non zero is returned in the host byte, otherwise 
- *  DID_ABORT is returned in the hostbyte.
- */
-
+extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
+extern void scsi_bottom_half_handler(void);
+extern void scsi_build_commandblocks(Scsi_Device * SDpnt);
+extern void scsi_done(Scsi_Cmnd * SCpnt);
+extern void scsi_finish_command(Scsi_Cmnd *);
+extern int scsi_retry_command(Scsi_Cmnd *);
+extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int, int);
+extern void scsi_release_command(Scsi_Cmnd *);
 extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd,
 			void *buffer, unsigned bufflen,
 			void (*done) (struct scsi_cmnd *),
 			int timeout, int retries);
-
 extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd,
 			  void *buffer, unsigned bufflen,
 			  void (*done) (struct scsi_cmnd *),
 			  int timeout, int retries);
+extern int scsi_dev_init(void);
 
-extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int, int);
-
-extern void scsi_release_command(Scsi_Cmnd *);
 
+/*
+ * Prototypes for functions/data in hosts.c
+ */
 extern int max_scsi_hosts;
 
+/*
+ * Prototypes for functions in scsi_proc.c
+ */
 extern void proc_print_scsidevice(Scsi_Device *, char *, int *, int);
 extern struct proc_dir_entry *proc_scsi;
 
+/*
+ * Prototypes for functions in constants.c
+ */
 extern void print_command(unsigned char *);
 extern void print_sense(const char *, Scsi_Cmnd *);
 extern void print_driverbyte(int scsiresult);
 extern void print_hostbyte(int scsiresult);
+extern void print_status (int status);
 
 /*
  *  The scsi_device struct contains what we know about each given scsi
  *  device.
+ *
+ * FIXME(eric) - one of the great regrets that I have is that I failed to define
+ * these structure elements as something like sdev_foo instead of foo.  This would
+ * make it so much easier to grep through sources and so forth.  I propose that
+ * all new elements that get added to these structures follow this convention.
+ * As time goes on and as people have the stomach for it, it should be possible to 
+ * go back and retrofit at least some of the elements here with with the prefix.
  */
 
 struct scsi_device {
@@ -538,6 +569,14 @@
 } Scsi_Pointer;
 
 
+/*
+ * FIXME(eric) - one of the great regrets that I have is that I failed to define
+ * these structure elements as something like sc_foo instead of foo.  This would
+ * make it so much easier to grep through sources and so forth.  I propose that
+ * all new elements that get added to these structures follow this convention.
+ * As time goes on and as people have the stomach for it, it should be possible to 
+ * go back and retrofit at least some of the elements here with with the prefix.
+ */
 struct scsi_cmnd {
 /* private: */
 	/*
@@ -680,16 +719,6 @@
  */
 #define SCSI_MLQUEUE_HOST_BUSY   0x1055
 #define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
-
-extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason);
-
-extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate,
-				   int sectors);
-
-extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
-			       int block_sectors);
-
-extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *);
 
 #define SCSI_SLEEP(QUEUE, CONDITION) {		    \
     if (CONDITION) {			            \
Index: linux/drivers/scsi/scsi_dma.c
diff -u /dev/null linux/drivers/scsi/scsi_dma.c:1.3
--- /dev/null	Mon Jan 10 22:17:54 2000
+++ linux/drivers/scsi/scsi_dma.c	Sat Jan  8 21:19:18 2000
@@ -0,0 +1,442 @@
+/*
+ *  scsi_dma.c Copyright (C) 2000 Eric Youngdale
+ *
+ *  mid-level SCSI DMA bounce buffer allocator
+ *
+ */
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/blk.h>
+
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+/*
+ * PAGE_SIZE must be a multiple of the sector size (512).  True
+ * for all reasonably recent architectures (even the VAX...).
+ */
+#define SECTOR_SIZE		512
+#define SECTORS_PER_PAGE	(PAGE_SIZE/SECTOR_SIZE)
+
+#if SECTORS_PER_PAGE <= 8
+typedef unsigned char FreeSectorBitmap;
+#elif SECTORS_PER_PAGE <= 32
+typedef unsigned int FreeSectorBitmap;
+#else
+#error You lose.
+#endif
+
+/*
+ * Used for access to internal allocator used for DMA safe buffers.
+ */
+static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
+
+static FreeSectorBitmap *dma_malloc_freelist = NULL;
+static int need_isa_bounce_buffers;
+static unsigned int dma_sectors = 0;
+unsigned int scsi_dma_free_sectors = 0;
+unsigned int scsi_need_isa_buffer = 0;
+static unsigned char **dma_malloc_pages = NULL;
+
+/*
+ * Function:    scsi_malloc
+ *
+ * Purpose:     Allocate memory from the DMA-safe pool.
+ *
+ * Arguments:   len       - amount of memory we need.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Pointer to memory block.
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ *              This function can only allocate in units of sectors
+ *              (i.e. 512 bytes).
+ *
+ *              We cannot use the normal system allocator becuase we need
+ *              to be able to guarantee that we can process a complete disk
+ *              I/O request without touching the system allocator.  Think
+ *              about it - if the system were heavily swapping, and tried to
+ *              write out a block of memory to disk, and the SCSI code needed
+ *              to allocate more memory in order to be able to write the
+ *              data to disk, you would wedge the system.
+ */
+void *scsi_malloc(unsigned int len)
+{
+	unsigned int nbits, mask;
+	unsigned long flags;
+
+	int i, j;
+	if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
+		return NULL;
+
+	nbits = len >> 9;
+	mask = (1 << nbits) - 1;
+
+	spin_lock_irqsave(&allocator_request_lock, flags);
+
+	for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
+		for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
+			if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
+				dma_malloc_freelist[i] |= (mask << j);
+				scsi_dma_free_sectors -= nbits;
+#ifdef DEBUG
+				SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
+				printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
+#endif
+				spin_unlock_irqrestore(&allocator_request_lock, flags);
+				return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
+			}
+		}
+	spin_unlock_irqrestore(&allocator_request_lock, flags);
+	return NULL;		/* Nope.  No more */
+}
+
+/*
+ * Function:    scsi_free
+ *
+ * Purpose:     Free memory into the DMA-safe pool.
+ *
+ * Arguments:   ptr       - data block we are freeing.
+ *              len       - size of block we are freeing.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       This function *must* only be used to free memory
+ *              allocated from scsi_malloc().
+ *
+ *              Prior to the new queue code, this function was not SMP-safe.
+ *              This function can only allocate in units of sectors
+ *              (i.e. 512 bytes).
+ */
+int scsi_free(void *obj, unsigned int len)
+{
+	unsigned int page, sector, nbits, mask;
+	unsigned long flags;
+
+#ifdef DEBUG
+	unsigned long ret = 0;
+
+#ifdef __mips__
+	__asm__ __volatile__("move\t%0,$31":"=r"(ret));
+#else
+	ret = __builtin_return_address(0);
+#endif
+	printk("scsi_free %p %d\n", obj, len);
+	SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
+#endif
+
+	spin_lock_irqsave(&allocator_request_lock, flags);
+
+	for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
+		unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
+		if ((unsigned long) obj >= page_addr &&
+		    (unsigned long) obj < page_addr + PAGE_SIZE) {
+			sector = (((unsigned long) obj) - page_addr) >> 9;
+
+			nbits = len >> 9;
+			mask = (1 << nbits) - 1;
+
+			if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
+				panic("scsi_free:Bad memory alignment");
+
+			if ((dma_malloc_freelist[page] &
+			     (mask << sector)) != (mask << sector)) {
+#ifdef DEBUG
+				printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
+				       obj, len, ret);
+#endif
+				panic("scsi_free:Trying to free unused memory");
+			}
+			scsi_dma_free_sectors += nbits;
+			dma_malloc_freelist[page] &= ~(mask << sector);
+			spin_unlock_irqrestore(&allocator_request_lock, flags);
+			return 0;
+		}
+	}
+	panic("scsi_free:Bad offset");
+}
+
+
+/*
+ * Function:    scsi_resize_dma_pool
+ *
+ * Purpose:     Ensure that the DMA pool is sufficiently large to be
+ *              able to guarantee that we can always process I/O requests
+ *              without calling the system allocator.
+ *
+ * Arguments:   None.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       Prior to the new queue code, this function was not SMP-safe.
+ *              Go through the device list and recompute the most appropriate
+ *              size for the dma pool.  Then grab more memory (as required).
+ */
+void scsi_resize_dma_pool(void)
+{
+	int i, k;
+	unsigned long size;
+	unsigned long flags;
+	struct Scsi_Host *shpnt;
+	struct Scsi_Host *host = NULL;
+	Scsi_Device *SDpnt;
+	FreeSectorBitmap *new_dma_malloc_freelist = NULL;
+	unsigned int new_dma_sectors = 0;
+	unsigned int new_need_isa_buffer = 0;
+	unsigned char **new_dma_malloc_pages = NULL;
+	int out_of_space = 0;
+
+	spin_lock_irqsave(&allocator_request_lock, flags);
+
+	if (!scsi_hostlist) {
+		/*
+		 * Free up the DMA pool.
+		 */
+		if (scsi_dma_free_sectors != dma_sectors)
+			panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
+
+		for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
+			free_pages((unsigned long) dma_malloc_pages[i], 0);
+		if (dma_malloc_pages)
+			kfree((char *) dma_malloc_pages);
+		dma_malloc_pages = NULL;
+		if (dma_malloc_freelist)
+			kfree((char *) dma_malloc_freelist);
+		dma_malloc_freelist = NULL;
+		dma_sectors = 0;
+		scsi_dma_free_sectors = 0;
+		spin_unlock_irqrestore(&allocator_request_lock, flags);
+		return;
+	}
+	/* Next, check to see if we need to extend the DMA buffer pool */
+
+	new_dma_sectors = 2 * SECTORS_PER_PAGE;		/* Base value we use */
+
+	if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
+		need_isa_bounce_buffers = 1;
+	else
+		need_isa_bounce_buffers = 0;
+
+	if (scsi_devicelist)
+		for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+			new_dma_sectors += SECTORS_PER_PAGE;	/* Increment for each host */
+
+	for (host = scsi_hostlist; host; host = host->next) {
+		for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
+			/*
+			 * sd and sr drivers allocate scatterlists.
+			 * sr drivers may allocate for each command 1x2048 or 2x1024 extra
+			 * buffers for 2k sector size and 1k fs.
+			 * sg driver allocates buffers < 4k.
+			 * st driver does not need buffers from the dma pool.
+			 * estimate 4k buffer/command for devices of unknown type (should panic).
+			 */
+			if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
+			    SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
+				new_dma_sectors += ((host->sg_tablesize *
+				sizeof(struct scatterlist) + 511) >> 9) *
+				 SDpnt->queue_depth;
+				if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
+					new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
+			} else if (SDpnt->type == TYPE_SCANNER ||
+				   SDpnt->type == TYPE_PROCESSOR ||
+				   SDpnt->type == TYPE_MEDIUM_CHANGER ||
+				   SDpnt->type == TYPE_ENCLOSURE) {
+				new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+			} else {
+				if (SDpnt->type != TYPE_TAPE) {
+					printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
+					new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+				}
+			}
+
+			if (host->unchecked_isa_dma &&
+			    need_isa_bounce_buffers &&
+			    SDpnt->type != TYPE_TAPE) {
+				new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
+				    SDpnt->queue_depth;
+				new_need_isa_buffer++;
+			}
+		}
+	}
+
+#ifdef DEBUG_INIT
+	printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
+#endif
+
+	/* limit DMA memory to 32MB: */
+	new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
+
+	/*
+	 * We never shrink the buffers - this leads to
+	 * race conditions that I would rather not even think
+	 * about right now.
+	 */
+#if 0				/* Why do this? No gain and risks out_of_space */
+	if (new_dma_sectors < dma_sectors)
+		new_dma_sectors = dma_sectors;
+#endif
+	if (new_dma_sectors <= dma_sectors) {
+		spin_unlock_irqrestore(&allocator_request_lock, flags);
+		return;		/* best to quit while we are in front */
+        }
+
+	for (k = 0; k < 20; ++k) {	/* just in case */
+		out_of_space = 0;
+		size = (new_dma_sectors / SECTORS_PER_PAGE) *
+		    sizeof(FreeSectorBitmap);
+		new_dma_malloc_freelist = (FreeSectorBitmap *)
+		    kmalloc(size, GFP_ATOMIC);
+		if (new_dma_malloc_freelist) {
+                        memset(new_dma_malloc_freelist, 0, size);
+			size = (new_dma_sectors / SECTORS_PER_PAGE) *
+			    sizeof(*new_dma_malloc_pages);
+			new_dma_malloc_pages = (unsigned char **)
+			    kmalloc(size, GFP_ATOMIC);
+			if (!new_dma_malloc_pages) {
+				size = (new_dma_sectors / SECTORS_PER_PAGE) *
+				    sizeof(FreeSectorBitmap);
+				kfree((char *) new_dma_malloc_freelist);
+				out_of_space = 1;
+			} else {
+                                memset(new_dma_malloc_pages, 0, size);
+                        }
+		} else
+			out_of_space = 1;
+
+		if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
+			for (i = dma_sectors / SECTORS_PER_PAGE;
+			   i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
+				new_dma_malloc_pages[i] = (unsigned char *)
+				    __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
+				if (!new_dma_malloc_pages[i])
+					break;
+			}
+			if (i != new_dma_sectors / SECTORS_PER_PAGE) {	/* clean up */
+				int k = i;
+
+				out_of_space = 1;
+				for (i = 0; i < k; ++i)
+					free_pages((unsigned long) new_dma_malloc_pages[i], 0);
+			}
+		}
+		if (out_of_space) {	/* try scaling down new_dma_sectors request */
+			printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
+			       "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
+			if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
+				break;	/* pretty well hopeless ... */
+			new_dma_sectors = (new_dma_sectors * 3) / 4;
+			new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
+			if (new_dma_sectors <= dma_sectors)
+				break;	/* stick with what we have got */
+		} else
+			break;	/* found space ... */
+	}			/* end of for loop */
+	if (out_of_space) {
+		spin_unlock_irqrestore(&allocator_request_lock, flags);
+		scsi_need_isa_buffer = new_need_isa_buffer;	/* some useful info */
+		printk("      WARNING, not enough memory, pool not expanded\n");
+		return;
+	}
+	/* When we dick with the actual DMA list, we need to
+	 * protect things
+	 */
+	if (dma_malloc_freelist) {
+		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
+		memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
+		kfree((char *) dma_malloc_freelist);
+	}
+	dma_malloc_freelist = new_dma_malloc_freelist;
+
+	if (dma_malloc_pages) {
+		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
+		memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
+		kfree((char *) dma_malloc_pages);
+	}
+	scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
+	dma_malloc_pages = new_dma_malloc_pages;
+	dma_sectors = new_dma_sectors;
+	scsi_need_isa_buffer = new_need_isa_buffer;
+
+	spin_unlock_irqrestore(&allocator_request_lock, flags);
+
+#ifdef DEBUG_INIT
+	printk("resize_dma_pool: dma free sectors   = %d\n", scsi_dma_free_sectors);
+	printk("resize_dma_pool: dma sectors        = %d\n", dma_sectors);
+	printk("resize_dma_pool: need isa buffers   = %d\n", scsi_need_isa_buffer);
+#endif
+}
+
+/*
+ * Function:    scsi_init_minimal_dma_pool
+ *
+ * Purpose:     Allocate a minimal (1-page) DMA pool.
+ *
+ * Arguments:   None.
+ *
+ * Lock status: No locks assumed to be held.  This function is SMP-safe.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       
+ */
+int scsi_init_minimal_dma_pool(void)
+{
+	unsigned long size;
+	unsigned long flags;
+	int has_space = 0;
+
+	spin_lock_irqsave(&allocator_request_lock, flags);
+
+	dma_sectors = PAGE_SIZE / SECTOR_SIZE;
+	scsi_dma_free_sectors = dma_sectors;
+	/*
+	 * Set up a minimal DMA buffer list - this will be used during scan_scsis
+	 * in some cases.
+	 */
+
+	/* One bit per sector to indicate free/busy */
+	size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
+	dma_malloc_freelist = (FreeSectorBitmap *)
+	    kmalloc(size, GFP_ATOMIC);
+	if (dma_malloc_freelist) {
+                memset(dma_malloc_freelist, 0, size);
+		/* One pointer per page for the page list */
+		dma_malloc_pages = (unsigned char **) kmalloc(
+                        (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
+							     GFP_ATOMIC);
+		if (dma_malloc_pages) {
+                        memset(dma_malloc_pages, 0, size);
+			dma_malloc_pages[0] = (unsigned char *)
+			    __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
+			if (dma_malloc_pages[0])
+				has_space = 1;
+		}
+	}
+	if (!has_space) {
+		if (dma_malloc_freelist) {
+			kfree((char *) dma_malloc_freelist);
+			if (dma_malloc_pages)
+				kfree((char *) dma_malloc_pages);
+		}
+		spin_unlock_irqrestore(&allocator_request_lock, flags);
+		printk("scsi::init_module: failed, out of memory\n");
+		return 1;
+	}
+
+	spin_unlock_irqrestore(&allocator_request_lock, flags);
+	return 0;
+}
Index: linux/drivers/scsi/scsi_lib.c
diff -u linux/drivers/scsi/scsi_lib.c:1.1.1.5 linux/drivers/scsi/scsi_lib.c:1.8
--- linux/drivers/scsi/scsi_lib.c:1.1.1.5	Fri Jan  7 22:33:08 2000
+++ linux/drivers/scsi/scsi_lib.c	Mon Jan 10 21:44:27 2000
@@ -51,6 +51,13 @@
  */
 
 /*
+ * For hosts that request single-file access to the ISA bus, this is a pointer to
+ * the currently active host.
+ */
+volatile struct Scsi_Host *host_active = NULL;
+
+
+/*
  * Function:    scsi_insert_special_cmd()
  *
  * Purpose:     Insert pre-formed command into request queue.
@@ -202,6 +209,23 @@
  *              If SCpnt is NULL, it means that the previous command
  *              was completely finished, and we should simply start
  *              a new command, if possible.
+ *
+ *		This is where a lot of special case code has begun to
+ *		accumulate.  It doesn't really affect readability or
+ *		anything, but it might be considered architecturally
+ *		inelegant.  If more of these special cases start to
+ *		accumulate, I am thinking along the lines of implementing
+ *		an atexit() like technology that gets run when commands
+ *		complete.  I am not convinced that it is worth the
+ *		added overhead, however.  Right now as things stand,
+ *		there are simple conditional checks, and most hosts
+ *		would skip past.
+ *
+ *		Another possible solution would be to tailor different
+ *		handler functions, sort of like what we did in scsi_merge.c.
+ *		This is probably a better solution, but the number of different
+ *		permutations grows as 2**N, and if too many more special cases
+ *		get added, we start to get screwed.
  */
 void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
 {
@@ -287,6 +311,48 @@
 			SHpnt->some_device_starved = 0;
 		}
 	}
+
+	/*
+	 * This is the code to deal with blocked hosts.  The idea is that if the current host is blocked,
+	 * yet the current host is inactive (as we have completed all requests outstanding, and the
+	 * current host was the "owner", then we walk through the list and search for a new owner,
+	 * and queue up some commands for those devices.
+	 */
+#ifdef CONFIG_SCSI_HOST_BLOCK
+	if(    SDpnt->host->block != NULL
+	    && host_active != NULL
+	    && SDpnt->host == host_active
+	    && host_active->host_busy == 0 ) {
+		/*
+		 * No host currently active.  Look for someone who is idle and who has requests.
+		 */
+		host_active = NULL;
+		
+		for(SHpnt = SDpnt->host->block; SHpnt != SDpnt->host; SHpnt = SHpnt->block) {
+			for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
+				request_queue_t *q;
+				if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
+				    || (SHpnt->host_blocked)) {
+					break;
+				}
+				if (SDpnt->device_blocked || !SDpnt->starved) {
+					continue;
+				}
+				q = &SDpnt->request_queue;
+				q->request_fn(q);
+				all_clear = 0;
+			}
+			if (SDpnt == NULL && all_clear) {
+				SHpnt->some_device_starved = 0;
+			}
+			if( host_active != NULL )
+			{
+				break;
+			}
+		}
+	}
+#endif
+
 	spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
@@ -732,6 +798,56 @@
 }
 
 /*
+ * Function:    scsi_blocked_request_fn()
+ *
+ * Purpose:     A request function wrapper for SCSI hosts that have blocking enabled.
+ *
+ * Arguments:   q       - Pointer to actual queue.
+ *
+ * Returns:     Nothing
+ *
+ * Lock status: IO request lock assumed to be held when called.
+ *
+ * Notes:
+ */
+void scsi_blocked_request_fn(request_queue_t * q)
+{
+	Scsi_Device *SDpnt;
+	struct Scsi_Host *SHpnt;
+
+	ASSERT_LOCK(&io_request_lock, 1);
+
+	SDpnt = (Scsi_Device *) q->queuedata;
+	if (!SDpnt) {
+		panic("Missing device");
+	}
+	SHpnt = SDpnt->host;
+
+	/*
+	 * If this host is currently blocked, then ignore the request for now.
+	 */
+	if( host_active != NULL && host_active != SHpnt ) {
+		return;
+	}
+
+	if( host_active == NULL ) {
+		host_active = SHpnt;
+	}
+
+	/*
+	 * At this point, call the normal request function.
+	 */
+	scsi_request_fn(q);
+
+	/*
+	 * If the host isn't active, take it back again.
+	 */
+	if( host_active == SHpnt && SHpnt->host_busy == 0 ) {
+		host_active = NULL;
+	}
+}
+
+/*
  * Function:    scsi_request_fn()
  *
  * Purpose:     Generic version of request function for SCSI hosts.
@@ -985,3 +1101,91 @@
 		spin_lock_irq(&io_request_lock);
 	}
 }
+
+/*
+ * Function:    scsi_make_blocked_list
+ *
+ * Purpose:     Build linked list of hosts that require blocking.
+ *
+ * Arguments:   None.
+ *
+ * Returns:     Nothing
+ *
+ * Notes:       Blocking is sort of a hack that is used to prevent more than one
+ *              host adapter from being active at one time.  This is used in cases
+ *              where the ISA bus becomes unreliable if you have more than one
+ *              host adapter really pumping data through.
+ *
+ *              We spent a lot of time examining the problem, and I *believe* that
+ *              the problem is bus related as opposed to being a driver bug.
+ *
+ *              The blocked list is used as part of the synchronization object
+ *              that we use to ensure that only one host is active at one time.
+ *              I (ERY) would like to make this go away someday, but this would
+ *              require that we have a recursive mutex object.
+ *
+ *		Note2: Now I wish I remember what I meant by that, because we now have
+ *		reader-writer locks...
+ */
+
+void scsi_make_blocked_list(void)
+{
+#ifdef CONFIG_SCSI_HOST_BLOCK
+	int block_count = 0, index;
+	struct Scsi_Host *sh[128], *shpnt;
+
+	/*
+	 * Create a circular linked list from the scsi hosts which have
+	 * the "wish_block" field in the Scsi_Host structure set.
+	 * The blocked list should include all the scsi hosts using ISA DMA.
+	 * In some systems, using two dma channels simultaneously causes
+	 * unpredictable results.
+	 * Among the scsi hosts in the blocked list, only one host at a time
+	 * is allowed to have active commands queued. The transition from
+	 * one active host to the next one is allowed only when host_busy == 0
+	 * for the active host (which implies host_busy == 0 for all the hosts
+	 * in the list). Moreover for block devices the transition to a new
+	 * active host is allowed only when a request is completed, since a
+	 * block device request can be divided into multiple scsi commands
+	 * (when there are few sg lists or clustering is disabled).
+	 *
+	 * (DB, 4 Feb 1995)
+	 */
+
+
+	host_active = NULL;
+
+	for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+
+#if 0
+		/*
+		 * Is this is a candidate for the blocked list?
+		 * Useful to put into the blocked list all the hosts whose driver
+		 * does not know about the host->block feature.
+		 */
+		if (shpnt->unchecked_isa_dma)
+			shpnt->wish_block = 1;
+#endif
+
+		if (shpnt->wish_block)
+			sh[block_count++] = shpnt;
+	}
+
+	if (block_count == 1)
+		sh[0]->block = NULL;
+
+	else if (block_count > 1) {
+
+		for (index = 0; index < block_count - 1; index++) {
+			sh[index]->block = sh[index + 1];
+			printk("scsi%d : added to blocked host list.\n",
+			       sh[index]->host_no);
+		}
+
+		sh[block_count - 1]->block = sh[0];
+		printk("scsi%d : added to blocked host list.\n",
+		       sh[index]->host_no);
+	}
+#endif
+}
+
Index: linux/include/linux/isapnp.h
diff -u linux/include/linux/isapnp.h:1.1.1.1 linux/include/linux/isapnp.h:1.2
--- linux/include/linux/isapnp.h:1.1.1.1	Sat Jan  8 12:51:32 2000
+++ linux/include/linux/isapnp.h	Sat Jan  8 13:07:09 2000
@@ -173,21 +173,21 @@
 extern inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
 extern inline void isapnp_write_word(unsigned char idx, unsigned short val) { ; }
 extern inline void isapnp_write_dword(unsigned char idx, unsigned int val) { ; }
-extern void isapnp_wake(unsigned char csn) { ; }
-extern void isapnp_device(unsigned char device) { ; }
-extern void isapnp_activate(unsigned char device) { ; }
-extern void isapnp_deactivate(unsigned char device) { ; }
+extern inline void isapnp_wake(unsigned char csn) { ; }
+extern inline void isapnp_device(unsigned char device) { ; }
+extern inline void isapnp_activate(unsigned char device) { ; }
+extern inline void isapnp_deactivate(unsigned char device) { ; }
 /* manager */
-extern struct pci_bus *isapnp_find_card(unsigned short vendor,
-				        unsigned short device,
-				        struct pci_bus *from) { return NULL; }
-extern struct pci_dev *isapnp_find_dev(struct pci_bus *card,
-				       unsigned short vendor,
-				       unsigned short function,
+extern inline struct pci_bus *isapnp_find_card(unsigned short vendor,
+					       unsigned short device,
+					       struct pci_bus *from) { return NULL; }
+extern inline struct pci_dev *isapnp_find_dev(struct pci_bus *card,
+					      unsigned short vendor,
+					      unsigned short function,
 				       struct pci_dev *from) { return NULL; }
-extern void isapnp_resource_change(struct resource *resource,
-				   unsigned long start,
-				   unsigned long size) { ; }
+extern inline void isapnp_resource_change(struct resource *resource,
+					  unsigned long start,
+					  unsigned long size) { ; }
 
 #endif /* CONFIG_ISAPNP */
 
Index: linux/include/linux/proc_fs.h
diff -u linux/include/linux/proc_fs.h:1.1.1.1 linux/include/linux/proc_fs.h:1.2
--- linux/include/linux/proc_fs.h:1.1.1.1	Sat Jan  8 12:51:31 2000
+++ linux/include/linux/proc_fs.h	Sat Jan  8 13:07:09 2000
@@ -181,11 +181,11 @@
 	mode_t mode, struct proc_dir_entry *parent) { return NULL; }
 
 extern inline void remove_proc_entry(const char *name, struct proc_dir_entry *parent) {};
-extern inline proc_dir_entry *proc_symlink(const char *name,
+extern inline struct proc_dir_entry *proc_symlink(const char *name,
 		struct proc_dir_entry *parent,char *dest) {return NULL;}
-extern inline proc_dir_entry *proc_mknod(const char *name,mode_t mode,
+extern inline struct proc_dir_entry *proc_mknod(const char *name,mode_t mode,
 		struct proc_dir_entry *parent,kdev_t rdev) {return NULL;}
-extern struct proc_dir_entry *proc_mkdir(const char *name,
+extern inline struct proc_dir_entry *proc_mkdir(const char *name,
 	struct proc_dir_entry *parent) {return NULL;}
 
 extern inline struct proc_dir_entry *create_proc_read_entry(const char *name,
