The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=68cc77a3b73ffda1e8ac891b9852faca833e11b7

commit 68cc77a3b73ffda1e8ac891b9852faca833e11b7
Author:     Mark Johnston <[email protected]>
AuthorDate: 2023-12-26 01:42:49 +0000
Commit:     Mark Johnston <[email protected]>
CommitDate: 2023-12-26 02:04:01 +0000

    mpr: Handle errors from copyout() in ioctl handlers
    
    In preparation for adding a __result_use_check annotation to copyin()
    and related functions, start checking for errors from copyout() in
    the mpr(4) user command handler.  This should make it easier to catch
    bugs.
    
    Reviewed by:    imp, asomers
    MFC after:      1 month
    Differential Revision:  https://reviews.freebsd.org/D43177
---
 sys/dev/mpr/mpr_user.c | 39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/sys/dev/mpr/mpr_user.c b/sys/dev/mpr/mpr_user.c
index f2847ae36d66..5245129ce8c1 100644
--- a/sys/dev/mpr/mpr_user.c
+++ b/sys/dev/mpr/mpr_user.c
@@ -714,9 +714,9 @@ mpr_user_command(struct mpr_softc *sc, struct 
mpr_usr_command *cmd)
        }       
 
        mpr_unlock(sc);
-       copyout(rpl, cmd->rpl, sz);
-       if (buf != NULL)
-               copyout(buf, cmd->buf, cmd->len);
+       err = copyout(rpl, cmd->rpl, sz);
+       if (buf != NULL && err == 0)
+               err = copyout(buf, cmd->buf, cmd->len);
        mpr_dprint(sc, MPR_USER, "%s: reply size %d\n", __func__, sz);
 
 RetFreeUnlocked:
@@ -848,7 +848,7 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
                /*
                 * Copy the reply data and sense data to user space.
                 */
-               if ((cm != NULL) && (cm->cm_reply != NULL)) {
+               if (err == 0 && cm != NULL && cm->cm_reply != NULL) {
                        rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
                        sz = rpl->MsgLength * 4;
 
@@ -858,7 +858,7 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
                                    __func__, data->ReplySize, sz);
                        }
                        mpr_unlock(sc);
-                       copyout(cm->cm_reply, PTRIN(data->PtrReply),
+                       err = copyout(cm->cm_reply, PTRIN(data->PtrReply),
                            MIN(sz, data->ReplySize));
                        mpr_lock(sc);
                }
@@ -1073,7 +1073,7 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
        /*
         * Copy the reply data and sense data to user space.
         */
-       if (cm->cm_reply != NULL) {
+       if (err == 0 && cm->cm_reply != NULL) {
                rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
                sz = rpl->MsgLength * 4;
 
@@ -1083,12 +1083,16 @@ mpr_user_pass_thru(struct mpr_softc *sc, 
mpr_pass_thru_t *data)
                            data->ReplySize, sz);
                }
                mpr_unlock(sc);
-               copyout(cm->cm_reply, PTRIN(data->PtrReply),
+               err = copyout(cm->cm_reply, PTRIN(data->PtrReply),
                    MIN(sz, data->ReplySize));
+               if (err != 0)
+                       mpr_dprint(sc, MPR_FAULT, "%s: failed to copy "
+                           "IOCTL data to user space\n", __func__);
                mpr_lock(sc);
 
-               if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
-                   (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+               if (err == 0 &&
+                   (function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+                   function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
                        if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState &
                            MPI2_SCSI_STATE_AUTOSENSE_VALID) {
                                sense_len =
@@ -1096,8 +1100,13 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
                                    SenseCount)), sizeof(struct
                                    scsi_sense_data));
                                mpr_unlock(sc);
-                               copyout(cm->cm_sense, (PTRIN(data->PtrReply +
-                                   sizeof(MPI2_SCSI_IO_REPLY))), sense_len);
+                               err = copyout(cm->cm_sense,
+                                   PTRIN(data->PtrReply +
+                                   sizeof(MPI2_SCSI_IO_REPLY)), sense_len);
+                               if (err != 0)
+                                       mpr_dprint(sc, MPR_FAULT,
+                                           "%s: failed to copy IOCTL data to "
+                                           "user space\n", __func__);
                                mpr_lock(sc);
                        }
                }
@@ -1116,7 +1125,7 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
                 * the same IOCTL structure and sense buffers can be used for
                 * NVMe.
                 */
-               if (function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
+               if (err == 0 && function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
                        if (cm->nvme_error_response == NULL) {
                                mpr_dprint(sc, MPR_INFO, "NVMe Error Response "
                                    "buffer is NULL. Response data will not be "
@@ -1130,9 +1139,13 @@ mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t 
*data)
                        sz = MIN(le32toh(nvme_error_reply->ErrorResponseCount),
                            NVME_ERROR_RESPONSE_SIZE);
                        mpr_unlock(sc);
-                       copyout(cm->cm_sense,
+                       err = copyout(cm->cm_sense,
                            (PTRIN(data->PtrReply +
                            sizeof(MPI2_SCSI_IO_REPLY))), sz);
+                       if (err != 0)
+                               mpr_dprint(sc, MPR_FAULT,
+                                   "%s: failed to copy IOCTL data to "
+                                   "user space\n", __func__);
                        mpr_lock(sc);
                }
        }

Reply via email to