[PATCH] missing null termination in power supply uevent

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=ed2ba977d43a6031f78f9e49d739ef5094f512e4
Commit: ed2ba977d43a6031f78f9e49d739ef5094f512e4
Parent: 335fb8fc71692830aca0a5a5fe7f60016ee0d0aa
Author: Stephen Hemminger [EMAIL PROTECTED]
AuthorDate: Thu Sep 20 12:06:10 2007 -0700
Committer:  Anton Vorontsov [EMAIL PROTECTED]
CommitDate: Fri Sep 21 01:22:23 2007 +0400

[PATCH] missing null termination in power supply uevent

Need to null terminate environment. Found by inspection
while looking for similar problems to platform uevent bug

Signed-off-by: Stephen Hemminger [EMAIL PROTECTED]
---
 drivers/power/power_supply_sysfs.c |1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/drivers/power/power_supply_sysfs.c 
b/drivers/power/power_supply_sysfs.c
index c7c4574..de3155b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, 
int num_envp,
if (ret)
goto out;
}
+   envp[i] = NULL;
 
 out:
free_page((unsigned long)prop_buf);
-
To unsubscribe from this list: send the line unsubscribe git-commits-head in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


IB/mlx4: Fix data corruption triggered by wrong headroom marking order

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=6e694ea33e7a7fad908d188c46f441f04fb633d4
Commit: 6e694ea33e7a7fad908d188c46f441f04fb633d4
Parent: 40ffbfad6bb79a99cc7627bdaca0ee22dec526f6
Author: Jack Morgenstein [EMAIL PROTECTED]
AuthorDate: Wed Sep 19 09:52:25 2007 -0700
Committer:  Roland Dreier [EMAIL PROTECTED]
CommitDate: Sun Sep 23 13:03:22 2007 -0700

IB/mlx4: Fix data corruption triggered by wrong headroom marking order

This is an addendum to commit 0e6e7416 (IB/mlx4: Handle new FW
requirement for send request prefetching).  We also need to handle
prefetch marking properly for S/G segments, or else the HCA may end up
processing S/G segments that are not fully written and end up sending
the wrong data.  This can actually cause data corruption in practice,
especially on systems with relatively slow CPUs (where the HCA is more
likely to prefetch while the CPU is in the middle of writing a work
request into memory).

We write S/G segments in reverse order into the WQE, in order to
guarantee that the first dword of all cachelines containing S/G
segments is written last (overwriting the headroom invalidation
pattern).  The entire cacheline will thus contain valid data when the
invalidation pattern is overwritten.

Signed-off-by: Jack Morgenstein [EMAIL PROTECTED]
Signed-off-by: Roland Dreier [EMAIL PROTECTED]
---
 drivers/infiniband/hw/mlx4/qp.c |   62 ++
 1 files changed, 49 insertions(+), 13 deletions(-)

diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ba0428d..85c51bd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct 
mlx4_wqe_datagram_seg *dseg,
dseg-qkey = cpu_to_be32(wr-wr.ud.remote_qkey);
 }
 
-static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
-struct ib_sge *sg)
+static void set_mlx_icrc_seg(void *dseg)
+{
+   u32 *t = dseg;
+   struct mlx4_wqe_inline_seg *iseg = dseg;
+
+   t[1] = 0;
+
+   /*
+* Need a barrier here before writing the byte_count field to
+* make sure that all the data is visible before the
+* byte_count field is set.  Otherwise, if the segment begins
+* a new cacheline, the HCA prefetcher could grab the 64-byte
+* chunk and get a valid (!= * 0x) byte count but
+* stale data, and end up sending the wrong data.
+*/
+   wmb();
+
+   iseg-byte_count = cpu_to_be32((1  31) | 4);
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
 {
-   dseg-byte_count = cpu_to_be32(sg-length);
dseg-lkey   = cpu_to_be32(sg-lkey);
dseg-addr   = cpu_to_be64(sg-addr);
+
+   /*
+* Need a barrier here before writing the byte_count field to
+* make sure that all the data is visible before the
+* byte_count field is set.  Otherwise, if the segment begins
+* a new cacheline, the HCA prefetcher could grab the 64-byte
+* chunk and get a valid (!= * 0x) byte count but
+* stale data, and end up sending the wrong data.
+*/
+   wmb();
+
+   dseg-byte_count = cpu_to_be32(sg-length);
 }
 
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
struct mlx4_wqe_ctrl_seg *ctrl;
+   struct mlx4_wqe_data_seg *dseg;
unsigned long flags;
int nreq;
int err = 0;
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct 
ib_send_wr *wr,
break;
}
 
-   for (i = 0; i  wr-num_sge; ++i) {
-   set_data_seg(wqe, wr-sg_list + i);
+   /*
+* Write data segments in reverse order, so as to
+* overwrite cacheline stamp last within each
+* cacheline.  This avoids issues with WQE
+* prefetching.
+*/
 
-   wqe  += sizeof (struct mlx4_wqe_data_seg);
-   size += sizeof (struct mlx4_wqe_data_seg) / 16;
-   }
+   dseg = wqe;
+   dseg += wr-num_sge - 1;
+   size += wr-num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
 
/* Add one more inline data segment for ICRC for MLX sends */
-   if (qp-ibqp.qp_type == IB_QPT_SMI || qp-ibqp.qp_type == 
IB_QPT_GSI) {
-   ((struct mlx4_wqe_inline_seg *) wqe)-byte_count =
-   cpu_to_be32((1  31) | 4);
-   ((u32 *) wqe)[1] = 0;
-   wqe  += sizeof (struct mlx4_wqe_data_seg);
+   

Convert snd-page-alloc proc file to use seq_file

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=ccec6e2c4a74adf76ed4e2478091a311b1806212
Commit: ccec6e2c4a74adf76ed4e2478091a311b1806212
Parent: 7bae705ef2c2daac1993de03e5be93b5c300fc5e
Author: Takashi Iwai [EMAIL PROTECTED]
AuthorDate: Mon Sep 17 21:55:10 2007 +0200
Committer:  Linus Torvalds [EMAIL PROTECTED]
CommitDate: Mon Sep 24 08:20:52 2007 -0700

Convert snd-page-alloc proc file to use seq_file

Use seq_file for the proc file read/write of snd-page-alloc module.
This automatically fixes bugs in the old proc code.

Signed-off-by: Takashi Iwai [EMAIL PROTECTED]
Signed-off-by: Linus Torvalds [EMAIL PROTECTED]
---
 sound/core/memalloc.c |   68 -
 1 files changed, 39 insertions(+), 29 deletions(-)

diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f057430..9b5656d 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -27,6 +27,7 @@
 #include linux/pci.h
 #include linux/slab.h
 #include linux/mm.h
+#include linux/seq_file.h
 #include asm/uaccess.h
 #include linux/dma-mapping.h
 #include linux/moduleparam.h
@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void)
 #define SND_MEM_PROC_FILE  driver/snd-page-alloc
 static struct proc_dir_entry *snd_mem_proc;
 
-static int snd_mem_proc_read(char *page, char **start, off_t off,
-int count, int *eof, void *data)
+static int snd_mem_proc_read(struct seq_file *seq, void *offset)
 {
-   int len = 0;
long pages = snd_allocated_pages  (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { UNKNOWN, CONT, DEV, DEV-SG, SBUS };
 
mutex_lock(list_mutex);
-   len += snprintf(page + len, count - len,
-   pages  : %li bytes (%li pages per %likB)\n,
-   pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
+   seq_printf(seq, pages  : %li bytes (%li pages per %likB)\n,
+  pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, mem_list_head, list) {
devno++;
-   len += snprintf(page + len, count - len,
-   buffer %d : ID %08x : type %s\n,
-   devno, mem-id, types[mem-buffer.dev.type]);
-   len += snprintf(page + len, count - len,
- addr = 0x%lx, size = %d bytes\n,
-   (unsigned long)mem-buffer.addr, 
(int)mem-buffer.bytes);
+   seq_printf(seq, buffer %d : ID %08x : type %s\n,
+  devno, mem-id, types[mem-buffer.dev.type]);
+   seq_printf(seq,   addr = 0x%lx, size = %d bytes\n,
+  (unsigned long)mem-buffer.addr,
+  (int)mem-buffer.bytes);
}
mutex_unlock(list_mutex);
-   return len;
+   return 0;
+}
+
+static int snd_mem_proc_open(struct inode *inode, struct file *file)
+{
+   return single_open(file, snd_mem_proc_read, NULL);
 }
 
 /* FIXME: for pci only - other bus? */
 #ifdef CONFIG_PCI
 #define gettoken(bufp) strsep(bufp,  \t\n)
 
-static int snd_mem_proc_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t snd_mem_proc_write(struct file *file, const char __user * 
buffer,
+ size_t count, loff_t * ppos)
 {
char buf[128];
char *token, *p;
 
-   if (count  ARRAY_SIZE(buf) - 1)
-   count = ARRAY_SIZE(buf) - 1;
+   if (count  sizeof(buf) - 1)
+   return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
-   buf[ARRAY_SIZE(buf) - 1] = '\0';
+   buf[count] = '\0';
 
p = buf;
token = gettoken(p);
if (! token || *token == '#')
-   return (int)count;
+   return count;
if (strcmp(token, add) == 0) {
char *endp;
int vendor, device, size, buffers;
@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char 
__user *buffer,
(buffers = simple_strtol(token, NULL, 0)) = 0 ||
buffers  4) {
printk(KERN_ERR snd-page-alloc: invalid proc write 
format\n);
-   return (int)count;
+   return count;
}
vendor = 0x;
device = 0x;
@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char 
__user *buffer,
if (pci_set_dma_mask(pci, mask)  0 ||
pci_set_consistent_dma_mask(pci, mask)  0) 
{
printk(KERN_ERR snd-page-alloc: cannot 
set DMA mask %lx for pci %04x:%04x\n, mask, vendor, 

async_tx: usage documentation and developer notes (v2)

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=c5d2b9f444b8d9f5ad7c5e583686c119ba3a9ba7
Commit: c5d2b9f444b8d9f5ad7c5e583686c119ba3a9ba7
Parent: 7bae705ef2c2daac1993de03e5be93b5c300fc5e
Author: Dan Williams [EMAIL PROTECTED]
AuthorDate: Thu Sep 20 15:49:08 2007 -0700
Committer:  Dan Williams [EMAIL PROTECTED]
CommitDate: Mon Sep 24 10:26:25 2007 -0700

async_tx: usage documentation and developer notes (v2)

Changes in v2:
* cleanups from Randy and Shannon

Reviewed-by: Randy Dunlap [EMAIL PROTECTED]
Reviewed-by: Shannon Nelson [EMAIL PROTECTED]
Signed-off-by: Dan Williams [EMAIL PROTECTED]
---
 Documentation/crypto/async-tx-api.txt |  219 +
 1 files changed, 219 insertions(+), 0 deletions(-)

diff --git a/Documentation/crypto/async-tx-api.txt 
b/Documentation/crypto/async-tx-api.txt
new file mode 100644
index 000..c1e9545
--- /dev/null
+++ b/Documentation/crypto/async-tx-api.txt
@@ -0,0 +1,219 @@
+Asynchronous Transfers/Transforms API
+
+1 INTRODUCTION
+
+2 GENEALOGY
+
+3 USAGE
+3.1 General format of the API
+3.2 Supported operations
+3.3 Descriptor management
+3.4 When does the operation execute?
+3.5 When does the operation complete?
+3.6 Constraints
+3.7 Example
+
+4 DRIVER DEVELOPER NOTES
+4.1 Conformance points
+4.2 My application needs finer control of hardware channels
+
+5 SOURCE
+
+---
+
+1 INTRODUCTION
+
+The async_tx API provides methods for describing a chain of asynchronous
+bulk memory transfers/transforms with support for inter-transactional
+dependencies.  It is implemented as a dmaengine client that smooths over
+the details of different hardware offload engine implementations.  Code
+that is written to the API can optimize for asynchronous operation and
+the API will fit the chain of operations to the available offload
+resources.
+
+2 GENEALOGY
+
+The API was initially designed to offload the memory copy and
+xor-parity-calculations of the md-raid5 driver using the offload engines
+present in the Intel(R) Xscale series of I/O processors.  It also built
+on the 'dmaengine' layer developed for offloading memory copies in the
+network stack using Intel(R) I/OAT engines.  The following design
+features surfaced as a result:
+1/ implicit synchronous path: users of the API do not need to know if
+   the platform they are running on has offload capabilities.  The
+   operation will be offloaded when an engine is available and carried out
+   in software otherwise.
+2/ cross channel dependency chains: the API allows a chain of dependent
+   operations to be submitted, like xor-copy-xor in the raid5 case.  The
+   API automatically handles cases where the transition from one operation
+   to another implies a hardware channel switch.
+3/ dmaengine extensions to support multiple clients and operation types
+   beyond 'memcpy'
+
+3 USAGE
+
+3.1 General format of the API:
+struct dma_async_tx_descriptor *
+async_operation(op specific parameters,
+ enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *dependency,
+ dma_async_tx_callback callback_routine,
+ void *callback_parameter);
+
+3.2 Supported operations:
+memcpy   - memory copy between a source and a destination buffer
+memset   - fill a destination buffer with a byte value
+xor  - xor a series of source buffers and write the result to a
+  destination buffer
+xor_zero_sum - xor a series of source buffers and set a flag if the
+  result is zero.  The implementation attempts to prevent
+  writes to memory
+
+3.3 Descriptor management:
+The return value is non-NULL and points to a 'descriptor' when the operation
+has been queued to execute asynchronously.  Descriptors are recycled
+resources, under control of the offload engine driver, to be reused as
+operations complete.  When an application needs to submit a chain of
+operations it must guarantee that the descriptor is not automatically recycled
+before the dependency is submitted.  This requires that all descriptors be
+acknowledged by the application before the offload engine driver is allowed to
+recycle (or free) the descriptor.  A descriptor can be acked by one of the
+following methods:
+1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
+2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
+   descriptor of a new operation.
+3/ calling async_tx_ack() on the descriptor.
+
+3.4 When does the operation execute?
+Operations do not immediately issue after return from the
+async_operation call.  Offload engine drivers batch operations to
+improve performance by reducing the number of mmio cycles needed to
+manage the channel.  Once a driver-specific threshold is met the driver
+automatically issues pending operations.  An application can force this
+event by calling async_tx_issue_pending_all().  

async_tx: fix dma_wait_for_async_tx

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=6247cdc2cd334dad0ea5428245a7d8f4b075f21e
Commit: 6247cdc2cd334dad0ea5428245a7d8f4b075f21e
Parent: c5d2b9f444b8d9f5ad7c5e583686c119ba3a9ba7
Author: Dan Williams [EMAIL PROTECTED]
AuthorDate: Fri Sep 21 13:27:04 2007 -0700
Committer:  Dan Williams [EMAIL PROTECTED]
CommitDate: Mon Sep 24 10:26:26 2007 -0700

async_tx: fix dma_wait_for_async_tx

Fix dma_wait_for_async_tx to not loop forever in the case where a
dependency chain is longer than two entries.  This condition will not
happen with current in-kernel drivers, but fix it for future drivers.

Found-by: Saeed Bishara [EMAIL PROTECTED]
Signed-off-by: Dan Williams [EMAIL PROTECTED]
---
 crypto/async_tx/async_tx.c |   12 ++--
 1 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 0350071..bc18cbb 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
enum dma_status status;
struct dma_async_tx_descriptor *iter;
+   struct dma_async_tx_descriptor *parent;
 
if (!tx)
return DMA_SUCCESS;
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
/* poll through the dependency chain, return when tx is complete */
do {
iter = tx;
-   while (iter-cookie == -EBUSY)
-   iter = iter-parent;
+
+   /* find the root of the unsubmitted dependency chain */
+   while (iter-cookie == -EBUSY) {
+   parent = iter-parent;
+   if (parent  parent-cookie == -EBUSY)
+   iter = iter-parent;
+   else
+   break;
+   }
 
status = dma_sync_wait(iter-chan, iter-cookie);
} while (status == DMA_IN_PROGRESS || (iter != tx));
-
To unsubscribe from this list: send the line unsubscribe git-commits-head in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


raid5: fix 2 bugs in ops_complete_biofill

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=e4d84909dd48b5e5806a5d18b881e1ca1610ba9b
Commit: e4d84909dd48b5e5806a5d18b881e1ca1610ba9b
Parent: 6247cdc2cd334dad0ea5428245a7d8f4b075f21e
Author: Dan Williams [EMAIL PROTECTED]
AuthorDate: Mon Sep 24 10:06:13 2007 -0700
Committer:  Dan Williams [EMAIL PROTECTED]
CommitDate: Mon Sep 24 13:23:35 2007 -0700

raid5: fix 2 bugs in ops_complete_biofill

1/ ops_complete_biofill tried to avoid calling handle_stripe since all the
state necessary to return read completions is available.  However the
process of determining whether more read requests are pending requires
locking the stripe (to block add_stripe_bio from updating dev-toead).
ops_complete_biofill can run in tasklet context, so rather than upgrading
all the stripe locks from spin_lock to spin_lock_bh this patch just
unconditionally reschedules handle_stripe after completing the read
request.

2/ ops_complete_biofill needlessly qualified processing R5_Wantfill with
dev-toread.  The result being that the 'biofill' pending bit is cleared
before handling the pending read-completions on dev-read.  R5_Wantfill can
be unconditionally handled because the 'biofill' pending bit prevents new
R5_Wantfill requests from being seen by ops_run_biofill and
ops_complete_biofill.

Found-by: Yuri Tikhonov [EMAIL PROTECTED]
[EMAIL PROTECTED]: simpler fix for bug 1 than moving code]
Signed-off-by: NeilBrown [EMAIL PROTECTED]
Signed-off-by: Dan Williams [EMAIL PROTECTED]
---
 drivers/md/raid5.c |   17 +++--
 1 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4d63773..f96dea9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
raid5_conf_t *conf = sh-raid_conf;
-   int i, more_to_read = 0;
+   int i;
 
pr_debug(%s: stripe %llu\n, __FUNCTION__,
(unsigned long long)sh-sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
/* clear completed biofills */
for (i = sh-disks; i--; ) {
struct r5dev *dev = sh-dev[i];
-   /* check if this stripe has new incoming reads */
-   if (dev-toread)
-   more_to_read++;
 
/* acknowledge completion of a biofill operation */
-   /* and check if we need to reply to a read request
-   */
-   if (test_bit(R5_Wantfill, dev-flags)  !dev-toread) {
+   /* and check if we need to reply to a read request,
+* new R5_Wantfill requests are held off until
+* !test_bit(STRIPE_OP_BIOFILL, sh-ops.pending)
+*/
+   if (test_and_clear_bit(R5_Wantfill, dev-flags)) {
struct bio *rbi, *rbi2;
-   clear_bit(R5_Wantfill, dev-flags);
 
/* The access to dev-read is outside of the
 * spin_lock_irq(conf-device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
 
return_io(return_bi);
 
-   if (more_to_read)
-   set_bit(STRIPE_HANDLE, sh-state);
+   set_bit(STRIPE_HANDLE, sh-state);
release_stripe(sh);
 }
 
-
To unsubscribe from this list: send the line unsubscribe git-commits-head in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[MIPS] SMTC: Make ack_bad_irq() safe with no IM backstop.

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=1146fe30504a1edd8a434f500e1be139492570c9
Commit: 1146fe30504a1edd8a434f500e1be139492570c9
Parent: 7bae705ef2c2daac1993de03e5be93b5c300fc5e
Author: Ralf Baechle [EMAIL PROTECTED]
AuthorDate: Fri Sep 21 17:13:55 2007 +0100
Committer:  Ralf Baechle [EMAIL PROTECTED]
CommitDate: Mon Sep 24 18:13:02 2007 +0100

[MIPS] SMTC: Make ack_bad_irq() safe with no IM backstop.

Issue reported and original patch by Kevin Kissel, cleaner (imho)
implementation by me.

Signed-off-by: Ralf Baechle [EMAIL PROTECTED]
---
 arch/mips/kernel/i8259.c |5 +
 arch/mips/kernel/irq-msc01.c |   10 ++
 arch/mips/kernel/irq.c   |   10 +-
 arch/mips/kernel/smtc.c  |5 -
 include/asm-mips/irq.h   |   32 
 5 files changed, 32 insertions(+), 30 deletions(-)

diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b6c3080..3a2d255 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -177,10 +177,7 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq,PIC_MASTER_CMD);  /* 'Specific EOI to master */
}
-#ifdef CONFIG_MIPS_MT_SMTC
-   if (irq_hwmask[irq]  ST0_IM)
-   set_c0_status(irq_hwmask[irq]  ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+   smtc_im_ack_irq(irq);
spin_unlock_irqrestore(i8259A_lock, flags);
return;
 
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 410868b..1ecdd50 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
mask_msc_irq(irq);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
/* This actually needs to be a call into platform code */
-   if (irq_hwmask[irq]  ST0_IM)
-   set_c0_status(irq_hwmask[irq]  ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+   smtc_im_ack_irq(irq);
 }
 
 /*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
-#ifdef CONFIG_MIPS_MT_SMTC
-   if (irq_hwmask[irq]  ST0_IM)
-   set_c0_status(irq_hwmask[irq]  ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+   smtc_im_ack_irq(irq);
 }
 
 /*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index aeded6c..a990aad 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
  */
 void ack_bad_irq(unsigned int irq)
 {
+   smtc_im_ack_irq(irq);
printk(unexpected IRQ # %d\n, irq);
 }
 
 atomic_t irq_err_count;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
-
 /*
  * Generic, controller-independent functions:
  */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 43826c1..f094043 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -25,8 +25,11 @@
 #include asm/smtc_proc.h
 
 /*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is 
set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
  */
+unsigned long irq_hwmask[NR_IRQS];
 
 #define LOCK_MT_PRA() \
local_irq_save(flags); \
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 97102eb..2cb52cf 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,7 +24,30 @@ static inline int irq_canonicalize(int irq)
 #define irq_canonicalize(irq) (irq)/* Sane hardware, sane code ... */
 #endif
 
+#ifdef CONFIG_MIPS_MT_SMTC
+
+struct irqaction;
+
+extern unsigned long irq_hwmask[];
+extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+  unsigned long hwmask);
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+   if (irq_hwmask[irq]  ST0_IM)
+   set_c0_status(irq_hwmask[irq]  ST0_IM);
+}
+
+#else
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
+
 /*
  * Clear interrupt mask handling backstop if irq_hwmask
  * entry so indicates. This implies that the ack() or end()
@@ -38,6 +61,7 @@ do {  
\
   ~(irq_hwmask[irq]  0xff00));\
 } while (0)
 #else
+
 #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
 #endif
 
@@ -60,14 +84,6 @@ do { 

Linux 2.6.23-rc8

2007-09-24 Thread Linux Kernel Mailing List
Gitweb: 
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4942de4a0e914f205d351a81873f4f63986bcc3c
Commit: 4942de4a0e914f205d351a81873f4f63986bcc3c
Parent: e0b9d139f2595fafbe95fcb7b40109db724900e8
Author: Linus Torvalds [EMAIL PROTECTED]
AuthorDate: Mon Sep 24 17:33:10 2007 -0700
Committer:  Linus Torvalds [EMAIL PROTECTED]
CommitDate: Mon Sep 24 17:33:10 2007 -0700

Linux 2.6.23-rc8

Getting there...
---
 Makefile |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/Makefile b/Makefile
index c265e41..4dac253 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 23
-EXTRAVERSION =-rc7
+EXTRAVERSION =-rc8
 NAME = Arr Matey! A Hairy Bilge Rat!
 
 # *DOCUMENTATION*
-
To unsubscribe from this list: send the line unsubscribe git-commits-head in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html