This is a note to let you know that I've just added the patch titled

    libceph: check for invalid mapping

to the 3.4-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     0113-libceph-check-for-invalid-mapping.patch
and it can be found in the queue-3.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From 4a8f60298277d7f7d3347ebae67b5a8705f36a9c Mon Sep 17 00:00:00 2001
From: Sage Weil <[email protected]>
Date: Mon, 24 Sep 2012 20:59:48 -0700
Subject: libceph: check for invalid mapping

From: Sage Weil <[email protected]>

(cherry picked from commit d63b77f4c552cc3a20506871046ab0fcbc332609)

If we encounter an invalid (e.g., zeroed) mapping, return an error
and avoid a divide by zero.

Signed-off-by: Sage Weil <[email protected]>
Reviewed-by: Alex Elder <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
 include/linux/ceph/osd_client.h |    2 +-
 include/linux/ceph/osdmap.h     |    6 +++---
 net/ceph/osd_client.c           |   32 ++++++++++++++++++++------------
 net/ceph/osdmap.c               |   18 ++++++++++++++++--
 4 files changed, 40 insertions(+), 18 deletions(-)

--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -207,7 +207,7 @@ extern void ceph_osdc_handle_reply(struc
 extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
                                 struct ceph_msg *msg);
 
-extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
                        struct ceph_file_layout *layout,
                        u64 snapid,
                        u64 off, u64 *plen, u64 *bno,
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -111,9 +111,9 @@ extern struct ceph_osdmap *osdmap_apply_
 extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
 
 /* calculate mapping of a file extent to an object */
-extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
-                                         u64 off, u64 *plen,
-                                         u64 *bno, u64 *oxoff, u64 *oxlen);
+extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
+                                        u64 off, u64 *plen,
+                                        u64 *bno, u64 *oxoff, u64 *oxlen);
 
 /* calculate mapping of object to a placement group */
 extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -52,7 +52,7 @@ static int op_has_extent(int op)
                op == CEPH_OSD_OP_WRITE);
 }
 
-void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
                        struct ceph_file_layout *layout,
                        u64 snapid,
                        u64 off, u64 *plen, u64 *bno,
@@ -62,12 +62,15 @@ void ceph_calc_raw_layout(struct ceph_os
        struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
        u64 orig_len = *plen;
        u64 objoff, objlen;    /* extent in object */
+       int r;
 
        reqhead->snapid = cpu_to_le64(snapid);
 
        /* object extent? */
-       ceph_calc_file_object_mapping(layout, off, plen, bno,
-                                     &objoff, &objlen);
+       r = ceph_calc_file_object_mapping(layout, off, plen, bno,
+                                         &objoff, &objlen);
+       if (r < 0)
+               return r;
        if (*plen < orig_len)
                dout(" skipping last %llu, final file extent %llu~%llu\n",
                     orig_len - *plen, off, *plen);
@@ -83,7 +86,7 @@ void ceph_calc_raw_layout(struct ceph_os
 
        dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
             *bno, objoff, objlen, req->r_num_pages);
-
+       return 0;
 }
 EXPORT_SYMBOL(ceph_calc_raw_layout);
 
@@ -112,20 +115,25 @@ EXPORT_SYMBOL(ceph_calc_raw_layout);
  *
  * fill osd op in request message.
  */
-static void calc_layout(struct ceph_osd_client *osdc,
-                       struct ceph_vino vino,
-                       struct ceph_file_layout *layout,
-                       u64 off, u64 *plen,
-                       struct ceph_osd_request *req,
-                       struct ceph_osd_req_op *op)
+static int calc_layout(struct ceph_osd_client *osdc,
+                      struct ceph_vino vino,
+                      struct ceph_file_layout *layout,
+                      u64 off, u64 *plen,
+                      struct ceph_osd_request *req,
+                      struct ceph_osd_req_op *op)
 {
        u64 bno;
+       int r;
 
-       ceph_calc_raw_layout(osdc, layout, vino.snap, off,
-                            plen, &bno, req, op);
+       r = ceph_calc_raw_layout(osdc, layout, vino.snap, off,
+                                plen, &bno, req, op);
+       if (r < 0)
+               return r;
 
        snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
        req->r_oid_len = strlen(req->r_oid);
+
+       return r;
 }
 
 /*
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -952,7 +952,7 @@ bad:
  * for now, we write only a single su, until we can
  * pass a stride back to the caller.
  */
-void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
+int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
                                   u64 off, u64 *plen,
                                   u64 *ono,
                                   u64 *oxoff, u64 *oxlen)
@@ -966,11 +966,17 @@ void ceph_calc_file_object_mapping(struc
 
        dout("mapping %llu~%llu  osize %u fl_su %u\n", off, *plen,
             osize, su);
+       if (su == 0 || sc == 0)
+               goto invalid;
        su_per_object = osize / su;
+       if (su_per_object == 0)
+               goto invalid;
        dout("osize %u / su %u = su_per_object %u\n", osize, su,
             su_per_object);
 
-       BUG_ON((su & ~PAGE_MASK) != 0);
+       if ((su & ~PAGE_MASK) != 0)
+               goto invalid;
+
        /* bl = *off / su; */
        t = off;
        do_div(t, su);
@@ -998,6 +1004,14 @@ void ceph_calc_file_object_mapping(struc
        *plen = *oxlen;
 
        dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
+       return 0;
+
+invalid:
+       dout(" invalid layout\n");
+       *ono = 0;
+       *oxoff = 0;
+       *oxlen = 0;
+       return -EINVAL;
 }
 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
 


Patches currently in stable-queue which might be from [email protected] are

queue-3.4/0073-libceph-clear-CONNECTING-in-ceph_con_close.patch
queue-3.4/0020-ceph-ensure-auth-ops-are-defined-before-use.patch
queue-3.4/0025-ceph-add-auth-buf-in-prepare_write_connect.patch
queue-3.4/0021-ceph-have-get_authorizer-methods-return-pointers.patch
queue-3.4/0026-libceph-avoid-unregistering-osd-request-when-not-reg.patch
queue-3.4/0077-libceph-distinguish-two-phases-of-connect-sequence.patch
queue-3.4/0045-libceph-provide-osd-number-when-creating-osd.patch
queue-3.4/0059-libceph-transition-socket-state-prior-to-actual-conn.patch
queue-3.4/0084-libceph-prevent-the-race-of-incoming-work-during-tea.patch
queue-3.4/0005-crush-fix-memory-leak-when-destroying-tree-buckets.patch
queue-3.4/0002-crush-adjust-local-retry-threshold.patch
queue-3.4/0111-libceph-avoid-NULL-kref_put-when-osd-reset-races-wit.patch
queue-3.4/0091-libceph-fix-fault-locking-close-socket-on-lossy-faul.patch
queue-3.4/0088-libceph-re-initialize-bio_iter-on-start-of-message-r.patch
queue-3.4/0023-ceph-return-pointer-from-prepare_connect_authorizer.patch
queue-3.4/0055-libceph-make-ceph_con_revoke_message-a-msg-op.patch
queue-3.4/0090-libceph-reset-connection-retry-on-successfully-negot.patch
queue-3.4/0054-libceph-make-ceph_con_revoke-a-msg-operation.patch
queue-3.4/0098-libceph-clean-up-con-flags.patch
queue-3.4/0093-libceph-move-ceph_con_send-closed-check-under-the-co.patch
queue-3.4/0066-libceph-move-init_bio_-functions-up.patch
queue-3.4/0018-ceph-define-ceph_auth_handshake-type.patch
queue-3.4/0105-libceph-recheck-con-state-after-allocating-incoming-.patch
queue-3.4/0063-libceph-encapsulate-out-message-data-setup.patch
queue-3.4/0076-libceph-separate-banner-and-connect-writes.patch
queue-3.4/0040-libceph-rename-socket-callbacks.patch
queue-3.4/0011-ceph-messenger-reset-connection-kvec-caller.patch
queue-3.4/0032-libceph-fix-messenger-retry.patch
queue-3.4/0070-libceph-don-t-change-socket-state-on-sock-event.patch
queue-3.4/0061-libceph-use-con-get-put-methods.patch
queue-3.4/0074-libceph-clear-NEGOTIATING-when-done.patch
queue-3.4/0019-ceph-messenger-reduce-args-to-create_authorizer.patch
queue-3.4/0041-libceph-rename-kvec_reset-and-kvec_add-functions.patch
queue-3.4/0047-libceph-embed-ceph-connection-structure-in-mon_clien.patch
queue-3.4/0029-libceph-use-con-get-put-ops-from-osd_client.patch
queue-3.4/0051-libceph-tweak-ceph_alloc_msg.patch
queue-3.4/0064-libceph-encapsulate-advancing-msg-page.patch
queue-3.4/0109-libceph-only-kunmap-kmapped-pages.patch
queue-3.4/0075-libceph-define-and-use-an-explicit-CONNECTED-state.patch
queue-3.4/0082-libceph-allow-sock-transition-from-CONNECTING-to-CLO.patch
queue-3.4/0114-ceph-avoid-32-bit-page-index-overflow.patch
queue-3.4/0015-ceph-messenger-check-prepare_write_connect-result.patch
queue-3.4/0003-crush-be-more-tolerant-of-nonsensical-crush-maps.patch
queue-3.4/0028-libceph-osd_client-don-t-drop-reply-reference-too-ea.patch
queue-3.4/0014-ceph-don-t-set-WRITE_PENDING-too-early.patch
queue-3.4/0049-libceph-init-monitor-connection-when-opening.patch
queue-3.4/0016-ceph-messenger-rework-prepare_connect_authorizer.patch
queue-3.4/0097-libceph-replace-connection-state-bits-with-states.patch
queue-3.4/0068-libceph-don-t-use-bio_iter-as-a-flag.patch
queue-3.4/0062-libceph-drop-ceph_con_get-put-helpers-and-nref-membe.patch
queue-3.4/0089-libceph-protect-ceph_con_open-with-mutex.patch
queue-3.4/0048-libceph-drop-connection-refcounting-for-mon_client.patch
queue-3.4/0031-libceph-flush-msgr-queue-during-mon_client-shutdown.patch
queue-3.4/0094-libceph-drop-gratuitous-socket-close-calls-in-con_wo.patch
queue-3.4/0101-libceph-revoke-mon_client-messages-on-session-restar.patch
queue-3.4/0013-ceph-drop-msgr-argument-from-prepare_write_connect.patch
queue-3.4/0080-libceph-set-peer-name-on-con_open-not-init.patch
queue-3.4/0103-libceph-avoid-dropping-con-mutex-before-fault.patch
queue-3.4/0043-libceph-start-separating-connection-flags-from-state.patch
queue-3.4/0046-libceph-set-CLOSED-state-bit-in-con_init.patch
queue-3.4/0085-libceph-report-socket-read-write-error-message.patch
queue-3.4/0083-libceph-initialize-msgpool-message-types.patch
queue-3.4/0092-libceph-move-msgr-clear_standby-under-con-mutex-prot.patch
queue-3.4/0095-libceph-close-socket-directly-from-ceph_con_close.patch
queue-3.4/0113-libceph-check-for-invalid-mapping.patch
queue-3.4/0009-ceph-messenger-change-read_partial-to-take-end-arg.patch
queue-3.4/0104-libceph-change-ceph_con_in_msg_alloc-convention-to-b.patch
queue-3.4/0096-libceph-drop-unnecessary-CLOSED-check-in-socket-stat.patch
queue-3.4/0017-ceph-messenger-check-return-from-get_authorizer.patch
queue-3.4/0086-libceph-fix-mutex-coverage-for-ceph_con_close.patch
queue-3.4/0001-crush-clean-up-types-const-ness.patch
queue-3.4/0072-libceph-don-t-touch-con-state-in-con_close_socket.patch
queue-3.4/0102-libceph-verify-state-after-retaking-con-lock-after-d.patch
queue-3.4/0037-ceph-check-PG_Private-flag-before-accessing-page-pri.patch
queue-3.4/0044-libceph-start-tracking-connection-socket-state.patch
queue-3.4/0099-libceph-clear-all-flags-on-con_close.patch
queue-3.4/0071-libceph-just-set-SOCK_CLOSED-when-state-changes.patch
queue-3.4/0022-ceph-use-info-returned-by-get_authorizer.patch
queue-3.4/0065-libceph-don-t-mark-footer-complete-before-it-is.patch
queue-3.4/0107-libceph-delay-debugfs-initialization-until-we-learn-.patch
queue-3.4/0110-rbd-reset-BACKOFF-if-unable-to-re-queue.patch
queue-3.4/0027-libceph-fix-pg_temp-updates.patch
queue-3.4/0079-libceph-add-some-fine-ASCII-art.patch
queue-3.4/0112-ceph-Fix-oops-when-handling-mdsmap-that-decreases-ma.patch
queue-3.4/0008-ceph-messenger-update-to-in-read_partial-caller.patch
queue-3.4/0007-ceph-messenger-use-read_partial-in-read_partial_mess.patch
queue-3.4/0078-libceph-small-changes-to-messenger.c.patch
queue-3.4/0010-libceph-don-t-reset-kvec-in-prepare_write_banner.patch
queue-3.4/0087-libceph-resubmit-linger-ops-when-pg-mapping-changes.patch
queue-3.4/0052-libceph-have-messages-point-to-their-connection.patch
queue-3.4/0067-libceph-move-init-of-bio_iter.patch
queue-3.4/0081-libceph-initialize-mon_client-con-only-once.patch
queue-3.4/0050-libceph-fully-initialize-connection-in-con_init.patch
queue-3.4/0053-libceph-have-messages-take-a-connection-reference.patch
queue-3.4/0100-libceph-fix-handling-of-immediate-socket-connect-fai.patch
queue-3.4/0012-ceph-messenger-send-banner-in-process_connect.patch
queue-3.4/0024-ceph-rename-prepare_connect_authorizer.patch
queue-3.4/0106-libceph-fix-crypto-key-null-deref-memory-leak.patch
queue-3.4/0042-libceph-embed-ceph-messenger-structure-in-ceph_clien.patch
queue-3.4/0069-libceph-SOCK_CLOSED-is-a-flag-not-a-state.patch
queue-3.4/0004-crush-fix-tree-node-weight-lookup.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to