Re: [PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Nick Piggin
On Thursday 25 October 2007 14:11, Andrew Morton wrote:
> On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox <[EMAIL PROTECTED]> wrote:
> > and associated infrastructure such as sync_page_killable and
> > fatal_signal_pending.  Use lock_page_killable in
> > do_generic_mapping_read() to allow us to kill `cat' of a file on an
> > NFS-mounted filesystem.
>
> whoa, big change.
>
> What exactly are the semantics here?  If the process has actually been
> killed (ie: we know that userspace won't be running again) then we break
> out of a lock_page() and allow the process to exit?  ie: it's basically
> invisible to userspace?

The actual conversions should also be relatively useful groundwork
if we ever want to make more things become generally interruptible.


> If so, it sounds OK.  I guess.  We're still screwed if the process is doing
> a synchronous write and lots of other scenarios.

I don't think it will matter in too many situations. If the process is
doing a synchronous write, nothing is guaranteed until the syscall
returns success...
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Andrew Morton
On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox <[EMAIL PROTECTED]> wrote:

> and associated infrastructure such as sync_page_killable and
> fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
> to allow us to kill `cat' of a file on an NFS-mounted filesystem.

whoa, big change.

What exactly are the semantics here?  If the process has actually been
killed (ie: we know that userspace won't be running again) then we break
out of a lock_page() and allow the process to exit?  ie: it's basically
invisible to userspace?

If so, it sounds OK.  I guess.  We're still screwed if the process is doing
a synchronous write and lots of other scenarios.

How well has this been tested?

Have the NFS guys had a think about it?

Why does it return -EIO from read() and not -EINTR?
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox <[EMAIL PROTECTED]>
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d1e32ec..7ccf92a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1856,7 +1856,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 0a805b7..50aa183 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(>pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47..6cccb4b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -172,6 +172,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -576,6 +582,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, >flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), ,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -967,7 +981,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@ -995,7 +1010,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@ -1006,15 +1022,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, ra);
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line "unsubscribe 

Re: [PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Andrew Morton
On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox [EMAIL PROTECTED] wrote:

 and associated infrastructure such as sync_page_killable and
 fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
 to allow us to kill `cat' of a file on an NFS-mounted filesystem.

whoa, big change.

What exactly are the semantics here?  If the process has actually been
killed (ie: we know that userspace won't be running again) then we break
out of a lock_page() and allow the process to exit?  ie: it's basically
invisible to userspace?

If so, it sounds OK.  I guess.  We're still screwed if the process is doing
a synchronous write and lots of other scenarios.

How well has this been tested?

Have the NFS guys had a think about it?

Why does it return -EIO from read() and not -EINTR?
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Nick Piggin
On Thursday 25 October 2007 14:11, Andrew Morton wrote:
 On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox [EMAIL PROTECTED] wrote:
  and associated infrastructure such as sync_page_killable and
  fatal_signal_pending.  Use lock_page_killable in
  do_generic_mapping_read() to allow us to kill `cat' of a file on an
  NFS-mounted filesystem.

 whoa, big change.

 What exactly are the semantics here?  If the process has actually been
 killed (ie: we know that userspace won't be running again) then we break
 out of a lock_page() and allow the process to exit?  ie: it's basically
 invisible to userspace?

The actual conversions should also be relatively useful groundwork
if we ever want to make more things become generally interruptible.


 If so, it sounds OK.  I guess.  We're still screwed if the process is doing
 a synchronous write and lots of other scenarios.

I don't think it will matter in too many situations. If the process is
doing a synchronous write, nothing is guaranteed until the syscall
returns success...
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 4/5] Add lock_page_killable

2007-10-24 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox [EMAIL PROTECTED]
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d1e32ec..7ccf92a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1856,7 +1856,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p)  __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 0a805b7..50aa183 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(tsk-pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47..6cccb4b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -172,6 +172,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -576,6 +582,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, page-flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), wait,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -967,7 +981,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page-mapping) {
@@ -995,7 +1010,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page-mapping == NULL) {
/*
@@ -1006,15 +1022,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, ra);
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc-error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line unsubscribe 

[PATCH 4/5] Add lock_page_killable

2007-10-18 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox <[EMAIL PROTECTED]>
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f02ade4..077893d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1738,7 +1738,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 3f28990..f89ab8d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -995,6 +995,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(>pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79f24a9..1498e24 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -171,6 +171,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -575,6 +581,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, >flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), ,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -966,7 +980,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@ -994,7 +1009,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@ -1005,15 +1021,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, ra);
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line "unsubscribe 

[PATCH 4/5] Add lock_page_killable

2007-10-18 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox [EMAIL PROTECTED]
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f02ade4..077893d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1738,7 +1738,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p)  __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 3f28990..f89ab8d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -995,6 +995,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(tsk-pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79f24a9..1498e24 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -171,6 +171,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -575,6 +581,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, page-flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), wait,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -966,7 +980,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page-mapping) {
@@ -994,7 +1009,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page-mapping == NULL) {
/*
@@ -1005,15 +1021,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, ra);
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc-error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line unsubscribe 

[PATCH 4/5] Add lock_page_killable

2007-09-01 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox <[EMAIL PROTECTED]>
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8a83537..8b4f533 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -154,6 +154,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -168,6 +169,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6769179..e6f20da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1769,7 +1769,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 986ba10..2b4fe29 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1003,6 +1003,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(>pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 90b657b..235f092 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -170,6 +170,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -574,6 +580,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, >flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), ,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -975,7 +989,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@ -1003,7 +1018,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@ -1014,15 +1030,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, );
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line "unsubscribe 

[PATCH 4/5] Add lock_page_killable

2007-09-01 Thread Matthew Wilcox
and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox [EMAIL PROTECTED]
---
 include/linux/pagemap.h |   14 ++
 include/linux/sched.h   |9 -
 kernel/signal.c |5 +
 mm/filemap.c|   25 +
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8a83537..8b4f533 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -154,6 +154,7 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -168,6 +169,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+   might_sleep();
+   if (TestSetPageLocked(page))
+   return __lock_page_killable(page);
+   return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6769179..e6f20da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1769,7 +1769,14 @@ static inline int signal_pending(struct task_struct *p)
 {
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+   return signal_pending(p)  __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 986ba10..2b4fe29 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1003,6 +1003,11 @@ void zap_other_threads(struct task_struct *p)
}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+   return sigismember(tsk-pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 90b657b..235f092 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -170,6 +170,12 @@ static int sync_page(void *word)
return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+   sync_page(word);
+   return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -574,6 +580,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+   DEFINE_WAIT_BIT(wait, page-flags, PG_locked);
+
+   return __wait_on_bit_lock(page_waitqueue(page), wait,
+   sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -975,7 +989,8 @@ page_ok:
 
 page_not_up_to_date:
/* Get exclusive access to the page ... */
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
 
/* Did it get truncated before we got the lock? */
if (!page-mapping) {
@@ -1003,7 +1018,8 @@ readpage:
}
 
if (!PageUptodate(page)) {
-   lock_page(page);
+   if (lock_page_killable(page))
+   goto readpage_eio;
if (!PageUptodate(page)) {
if (page-mapping == NULL) {
/*
@@ -1014,15 +1030,16 @@ readpage:
goto find_page;
}
unlock_page(page);
-   error = -EIO;
shrink_readahead_size_eio(filp, ra);
-   goto readpage_error;
+   goto readpage_eio;
}
unlock_page(page);
}
 
goto page_ok;
 
+readpage_eio:
+   error = -EIO;
 readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc-error = error;
-- 
1.4.4.2

-
To unsubscribe from this list: send the line