Hi Wang Bowen,

Thanks for sending me the updated files.

I think I found the problem with the move/rename. When porting to 6.6.23 I had 
to adapt the assignment of 'struct renamedata rd' since the struct got changed 
in commit abf0857.
Previously I only updated 'rd.old_mnt_userns = &init_user_ns;' to 
'rd.old_mnt_idmap = &nop_mnt_idmap;'. The error disappeared when I added 
'rd.new_mnt_idmap = &nop_mnt_idmap;' as well.
See below our rpmsgfs_rename_handler.

Do you also have the file that contain the rpmsg_release_tx_buffer() function? 
It could not be resolved. I guess rpmsg_core.c.

Kind regards,
Andre Heinemans

static int rpmsgfs_rename_handler(struct rpmsg_device *rpdev,
                                  void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_rename *msg = data;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
        struct renamedata rd;
#endif
        struct path oldpath, newpath;
        struct dentry *newdentry;
        char *oldname, *newname;
        int ret, oldlen;
        oldname = msg->pathname;
        oldlen  = (strlen(msg->pathname) + 1 + 0x7) & ~0x7;
        newname = msg->pathname + oldlen;
        ret = kern_path(oldname, 0, &oldpath);
        if (ret < 0)
                goto fail;
        if (!oldpath.dentry || !oldpath.dentry->d_parent) {
                ret = -ENOENT;
                goto fail1;
        }
        newdentry = kern_path_locked(newname, &newpath);
        if (IS_ERR(newdentry)) {
                ret = PTR_ERR(newdentry);
                goto fail1;
        }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
        rd.old_mnt_idmap = &nop_mnt_idmap;
        rd.new_mnt_idmap = &nop_mnt_idmap;
 #else
        rd.old_mnt_userns = &init_user_ns;
 #endif
        rd.old_dir = oldpath.dentry->d_parent->d_inode;
        rd.old_dentry = oldpath.dentry;
        rd.new_dir = d_inode(newpath.dentry);
        rd.new_dentry = newdentry;
        rd.delegated_inode = NULL;
        rd.flags = 0;
        ret = vfs_rename(&rd);
#else
        ret = vfs_rename(oldpath.dentry->d_parent->d_inode, oldpath.dentry,
                         d_inode(newpath.dentry), newdentry, NULL, 0);
#endif
        dput(newdentry);
        inode_unlock(d_inode(newpath.dentry));
        path_put(&newpath);
fail1:
        path_put(&oldpath);
fail:
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

> -----Original Message-----
> From: 汪博文 <wangbow...@xiaomi.com>
> Sent: Friday, 8 November 2024 10:11
> To: dev@nuttx.apache.org; Andre Heinemans <andre.heinem...@nxp.com>
> Subject: Re: [External Mail]RE: [EXT] Re: RPMsg FS on NuttX and Linux
> 
>       Caution: This is an external email. Please take care when clicking links
> or opening attachments. When in doubt, report the message using the 'Report
> this email' button
> 
> Hi Andre,
> 
> Sorry for late reply.
> ‍
> ‍About your questions:
> 1. Your small fixes in Linux Rpmsg FS is right or not?
> 
> ‍‍
>  Yes, your fix is right and we will update our internal version.
> 2. Any updates in our internal rpmsgfs and rpmsgtty?
> 
> Actually, only some minor updates:
> ‍
> - We changed the rpmsg_fs and rpmsg_tty to kernel modules, and I have
> attached the new rpmsg_fs.c and rpmsg_tty.c in this email, you can check
> them;
> -  We also add some kernel version checks in rpmsg_fs and rpmsg_tty to make
> them can work with both kernel v5.10 and v5.15 And no other updates in
> rpmsg_fs and rpmsg_tty But we are planing to implement the rpmsg_fs client
> in linux (linux mount nuttx file system) and also port these rpmsg services to
> the linux kernel v6.6.
> 3. The panic in linux caused by move or rename operation We haven't
> encountered this problem before, but we can try this case in v6.6 when we
> porting rpmsg services to the new kernel version.
> 
> Kind regards,
> Wang Bowen
> 
> ---- Replied Message ----
> From   Andre Heinemans<andre.heinem...@nxp.com> <mailto:undefined>
> 
> Date   11/8/2024 00:33
> To     dev@nuttx.apache.org<dev@nuttx.apache.org>,
> <mailto:dev@nuttx.apache.org> Xiang Xiao<xiaoxiang781...@gmail.com>,
> <mailto:xiaoxiang781...@gmail.com> Bowen
> Wang<bowenwa...@gmail.com> <mailto:bowenwa...@gmail.com>
> Subject        [External Mail]RE: [EXT] Re: RPMsg FS on NuttX and Linux
> [外部邮件]
> 此邮件来源于小米公司外部,请谨慎处理。若对邮件安全性存疑,请将
> 邮件转发给mi...@xiaomi.com进行反馈
> 
> Hi Wang Bowen, Xiang Xiao,
> 
> When I want to move a file through rpmsgfs from within nuttx on a filesystem
> hosted on linux, then I get this linux kernel oops below.
> I used the rpmsgfs linux implementation provided on this mail thread but
> ported it to 6.6.23.
> 
> It looks like every move or rename gives the error. Do you know what could
> cause the problem?
> 
> Are there maybe any updates of the linux rpmsgfs driver which I could try?
> 
> Kind regards,
> Andre
> 
> [  121.160624] Unable to handle kernel paging request at virtual address
> ffff8003848c38dc [  121.168612] Mem abort info:
> [  121.171464]   ESR = 0x0000000096000005
> [  121.175216]   EC = 0x25: DABT (current EL), IL = 32 bits
> [  121.180568]   SET = 0, FnV = 0
> [  121.183660]   EA = 0, S1PTW = 0
> [  121.186801]   FSC = 0x05: level 1 translation fault
> [  121.191709] Data abort info:
> [  121.194592]   ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000
> [  121.200094]   CM = 0, WnR = 0, TnD = 0, TagAccess = 0
> [  121.205172]   GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
> [  121.210511] swapper pgtable: 4k pages, 48-bit VAs,
> pgdp=000000009216d000 [  121.217236] [ffff8003848c38dc]
> pgd=100000047ffff003, p4d=100000047ffff003, pud=0000000000000000
> [  121.225975] Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP
> [  121.232241] Modules linked in:
> [  121.235320] CPU: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.6.23-
> g32082bae40f6 #55 [  121.242735] Hardware name: NXP i.MX95 19X19
> board (DT) [  121.247885] Workqueue: imx95-cm7 imx_rproc_vq_work
> [  121.252703] pstate: a0400009 (NzCv daif +PAN -UAO -TCO -DIT -SSBS
> BTYPE=--) [  121.259678] pc : cmp_map_id+0x4/0x64 [  121.263266] lr :
> bsearch+0x50/0xb8 [  121.266698] sp : ffff8000825d39e0 [  121.270026]
> x29: ffff8000825d39e0 x28: ffff0000812c7600 x27: ffff0000824f8900
> [  121.277176] x26: ffff8000825d3bd8 x25: ffff800080158ae0 x24:
> ffff8000825d3a5c [  121.284326] x23: ffff800080fffe1c x22:
> 000000000000000c x21: 00000000404baf90 [  121.291476] x20:
> ffff8003848c38dc x19: 0000000080975f1f x18: ffff800082e7be28
> [  121.298626] x17: 00000000672cba83 x16: 0000000000000fdc x15:
> 0000000000001000 [  121.305793] x14: ffffffffffffffff x13:
> ffff00008086e029 x12: ffff8000825d3a74 [  121.312943] x11:
> 000000075b7a4da2 x10: 0000000000000002 x9 : 0000000000000007
> [  121.320102] x8 : ffff00008874affc x7 : 000000000000000c x6 :
> 0000000000000001 [  121.327243] x5 : 0000000000000001 x4 :
> 0000000000000001 x3 : 000000000000000c [  121.334385] x2 :
> 0000000080975f20 x1 : ffff8003848c38dc x0 : ffff8000825d3a5c
> [  121.341536] Call trace:
> [  121.343997]  cmp_map_id+0x4/0x64
> [  121.347238]  map_id_up+0xe4/0xec
> [  121.350479]  from_kuid+0x10/0x1c
> [  121.353712]  from_vfsuid+0x48/0x7c
> [  121.357135]  vfs_rename+0x43c/0xa14
> [  121.360627]  rpmsgfs_rename_handler+0xcc/0x148 [  121.365091]
> rpmsgfs_callback+0x34/0x88 [  121.368930]
> rpmsg_recv_done+0x114/0x368 [  121.372874]  vring_interrupt+0x74/0x110
> [  121.376721]  rproc_vq_interrupt+0x3c/0x90 [  121.380751]
> imx_rproc_notified_idr_cb+0x18/0x28
> [  121.385397]  idr_for_each+0x68/0xf4
> [  121.388907]  imx_rproc_vq_work+0x24/0x30 [  121.392850]
> process_one_work+0x138/0x260 [  121.396880]
> worker_thread+0x32c/0x438 [  121.400633]  kthread+0x118/0x11c
> [  121.403883]  ret_from_fork+0x10/0x20 [  121.407506] Code: 12800000
> 17ffffdd 00000000 39400004 (29400823) [  121.413609] ---[ end trace
> 0000000000000000 ]---
> 
> 
> 
>       -----Original Message-----
>       From: Andre Heinemans <andre.heinem...@nxp.com>
>       Sent: Friday, 1 November 2024 12:14
>       To: dev@nuttx.apache.org
>       Subject: RE: [EXT] Re: RPMsg FS on NuttX and Linux
> 
>       Caution: This is an external email. Please take care when clicking links
> or opening
>       attachments. When in doubt, report the message using the 'Report
> this email'
>       button
> 
> 
>       Hi Wang Bowen, Xiang Xiao
> 
>       The RPMsg fs and tty works wonderful with the linux drivers I received
> from you.
>       I was wondering if there are any updates on the linux drivers since the
> NuttX
>       drivers do get updates quite often. It would be great if you could share
> these
>       updates.
> 
>       On the rpmsg fs driver in linux I made a small fix on the file flags
> mapping in
>       rpmsgfs_open_handler(). In NuttX the O_RDONLY symbol resolves to
> bit 0(value
>       1) but in linux it has the value 0.
>       When you open a file in NuttX with O_RDWR then on linux it would
> result in
>       opening the file as write-only.
> 
>       Below change fixed it.
> 
>       diff --git a/drivers/rpmsg/rpmsg_fs.c b/drivers/rpmsg/rpmsg_fs.c
> index
>       3cd35196f408..f354c6ac5ab4 100755
>       --- a/drivers/rpmsg/rpmsg_fs.c
>       +++ b/drivers/rpmsg/rpmsg_fs.c
>       @@ -283,12 +283,15 @@ static int rpmsgfs_open_handler(struct
> rpmsg_device
>       *rpdev,
>       struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
>       struct rpmsgfs_open *msg = data;
>       struct file *filp;
>       -       int id, flags = 0;
>       +       int id, flags = O_RDONLY;
>       +
>       +       if (msg->flags & RPMSGFS_O_WRONLY) {
>       +               if (msg->flags & RPMSGFS_O_RDONLY)
>       +                       flags = O_RDWR;
>       +               else
>       +                       flags = O_WRONLY;
>       +       }
> 
>       -       if (msg->flags & RPMSGFS_O_RDONLY)
>       -               flags |= O_RDONLY;
>       -       if (msg->flags & RPMSGFS_O_WRONLY)
>       -               flags |= O_WRONLY;
>       if (msg->flags & RPMSGFS_O_CREAT)
>       flags |= O_CREAT;
>       if (msg->flags & RPMSGFS_O_EXCL)
> 
> 
>       Kind regards,
>       Andre
> 
> 
> 
>               -----Original Message-----
>               From: Bowen Wang <bowenwa...@gmail.com>
>               Sent: Thursday, 27 June 2024 11:26
>               To: dev@nuttx.apache.org
>               Subject: Re: [EXT] Re: RPMsg FS on NuttX and Linux
> 
>               Caution: This is an external email. Please take care when
>               clicking links or opening attachments. When in doubt, report
> the
>               message using the 'Report this email' button
> 
>               Hi Andre,
> 
>               The attachment is the patch for rpmsgfs, we only implement
> the rpmsgfs
>               server side in linux for now, rpmsgfs client side in linux is 
> still
>               under development.
> 
>               So we can access the Linux‘s file system in NuttX, but in Linux,
> we
>               can't mount the Vela's file system.
> 
> 
>               Kind regards,
> 
>               Wang Bowen
> 
>               Andre Heinemans <andre.heinem...@nxp.com
>               <mailto:andre.heinem...@nxp.com> >
> 于2024年6月27日周四 17:12写道:
> 
> 
>               Hi Bowen,
> 
>               I am using 6.6.3. But it would be okay for me if I receive the
>               patches for version 5.15. I will try to adapt them.
> 
>               Thanks in advance!
> 
>               Kind regards,
>               Andre
> 
>               -----Original Message-----
>               From: Bowen Wang <bowenwa...@gmail.com
>               <mailto:bowenwa...@gmail.com> >
>               Sent: Thursday, 27 June 2024 03:46
>               To: dev@nuttx.apache.org <mailto:dev@nuttx.apache.org>
>               Subject: [EXT] Re: RPMsg FS on NuttX and Linux
> 
>               Caution: This is an external email. Please take care when
>               clicking links or opening attachments. When in doubt, report
> the
>               message using the 'Report this email' button
> 
> 
>               Hi, Andre
> 
>               Which Linux version are you using?
>               We have adapted RPMSGFS on 5.4 and 5.15. We are not sure
> if it
>               meets your requirements.
> 
>               Kind regards,
>               Wang Bowen
> 
>               Xiang Xiao <xiaoxiang781...@gmail.com
>               <mailto:xiaoxiang781...@gmail.com> >
> 于2024年6月25日周二 22:47写道:
> 
> 
> 
>                       rpmsg_fs isn't upstreamed to the Linux community
> yet. Bowen
> 
> 
>               could give
> 
> 
>                       you a patch to try.
> 
>                       On Tue, Jun 25, 2024 at 8:44 PM Andre Heinemans
>                       <andre.heinem...@nxp.com
> 
> 
>               <mailto:andre.heinem...@nxp.com> >
> 
> 
>                       wrote:
> 
> 
> 
>                               Hi,
> 
>                               I am searching for a solution to share a disk or
> directory on a
>                               Linux
> 
> 
>                       host
> 
> 
>                               and make it accessible in NuttX through
> RPMsg. It looks like
>                               CONFIG_FS_RPMSGFS is the feature I need.
> According to this
> 
> 
>               NuttX
> 
> 
>                               channel video
> 
> 
> 
> 
>       (https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F
> %2Fwww
>               .youtube.com%2Fwatch%3Fv%3D-
> 
> 
>       YLAQlJR1vA&data=05%7C02%7Candre.heinemans%
> 
> 
> 
>       40nxp.com%7C6f0b6a9c5414411f9a6f08dcfa6682e9%7C686ea1d3
> bc2b4c6fa92c
>       d99
> 
> 
> 
>       c5c301635%7C0%7C0%7C638660565421329758%7CUnknown%7
> CTWFpbGZsb3
>       d8eyJWIjo
> 
> 
> 
>       iMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3
> D%7C0%7C
>       %7C%
> 
> 
> 
>       7C&sdata=dQFQqYjk0nIFKGjDD%2FM54PHXW54rnZVWyYojrOGCV%
> 2BY%3D&res
>       erved=0
> 
> 
> 
>       <https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F
> %2Fwww%2F
>       &data=05%7C02%7Candre.heinemans%40nxp.com%7C6f0b6a9c54
> 14411f9a6f08
>       dcfa6682e9%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%
> 7C63866056
>       5421352725%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAw
> MDAiLCJQIjoiV
>       2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C0%7C%7C%7C&sdat
> a=8GauYdm
>       OMhqbLro1EWfPIBGftfYLE%2B79%2BDzvvfBqw7E%3D&reserved=0.
> 
> 
>               youtube.com%2Fwatch%3Fv%3D-
> 
>       YLAQlJR1vA&data=05%7C02%7Candre.heinemans%40nxp.com%7C7
> b20118b
> 
> 
> 
>       5af34936356e08dc968b5405%7C686ea1d3bc2b4c6fa92cd99c5c30
> 1635%7C0%
> 
> 
> 
>       7C0%7C638550772355433038%7CUnknown%7CTWFpbGZsb3d8ey
> JWIjoiMC4
> 
>       wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7
> C0%7C%7
> 
>       C%7C&sdata=wNH%2FytC3N7YXDeP8Cff3UZ3JeMbwpFIqaNWeznee
> oJ0%3D&
>               reserved=0> )  various RPMsg services in Nuttx are supported
> in Linux
>               including RPMsg FS.
> 
> 
> 
>                               So, I tried setting this up but couldn’t find 
> the
> correct linux
>                               module that is able to interact with this
> service.
> 
>                               When I try to mount a directory with
>                               nsh> mount -t rpmsgfs -o
> cpu=netcore,fs=/mnt /mnt
> 
>                               Some interaction happens. See linux dmesg
> log:
>                               [   61.086471] virtio_rpmsg_bus virtio0:
> creating channel
>                               rpmsgfs-0x2000c1c8 addr 0x401
> 
>                               But it seems no linux driver is listening to 
> that
> channel. When I
>                               try to access the mounted directory in NuttX
> then nsh freezes.
> 
>                               What do I need to do on Linux to support this
> service? Do I maybe
>                               need a patch or a different fork?
> 
>                               Kind regards,
>                               Andre
> 
> 
> 
> 
> 
> 
> #/******本邮件及其附件含有小米公司的保密信息,仅限于发送给上面
> 地址中列出的个人或群组。禁止任何其他人以任何形式使用(包括但不
> 限于全部或部分地泄露、复制、或散发)本邮件中的信息。如果您错收
> 了本邮件,请您立即电话或邮件通知发件人并删除本邮件! This e-mail
> and its attachments contain confidential information from XIAOMI, which is
> intended only for the person or entity whose address is listed above. Any use
> of the information contained herein in any way (including, but not limited to,
> total or partial disclosure, reproduction, or dissemination) by persons other
> than the intended recipient(s) is prohibited. If you receive this e-mail in 
> error,
> please notify the sender by phone or email immediately and delete it!******/#
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2017 Pinecone Inc.
 *
 * redirect fs API from remote to the kernel.
 */
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/namei.h>
#include <linux/rpmsg.h>
#include <linux/slab.h>
#include <linux/statfs.h>

#include <generated/uapi/linux/version.h>

/* start from 3 because 0, 1, 2 is reserved for stdin, stdout and stderr */
#define RPMSGFS_ID_START        3

/* need exactly match the definitions from REMOTE include/dirent.h: */

#define RPMSGFS_DT_UNKNOWN      0
#define RPMSGFS_DT_FIFO         1
#define RPMSGFS_DT_CHR          2
#define RPMSGFS_DT_SEM          3
#define RPMSGFS_DT_DIR          4
#define RPMSGFS_DT_MQ           5
#define RPMSGFS_DT_BLK          6
#define RPMSGFS_DT_SHM          7
#define RPMSGFS_DT_REG          8
#define RPMSGFS_DT_MTD          9
#define RPMSGFS_DT_LNK          10
#define RPMSGFS_DT_SOCK         12
#define RPMSGFS_DTYPE_FIFO      RPMSGFS_DT_FIFO
#define RPMSGFS_DTYPE_CHR       RPMSGFS_DT_CHR
#define RPMSGFS_DTYPE_SEM       RPMSGFS_DT_SEM
#define RPMSGFS_DTYPE_DIRECTORY RPMSGFS_DT_DIR
#define RPMSGFS_DTYPE_MQ        RPMSGFS_DT_MQ
#define RPMSGFS_DTYPE_BLK       RPMSGFS_DT_BLK
#define RPMSGFS_DTYPE_SHM       RPMSGFS_DT_SHM
#define RPMSGFS_DTYPE_FILE      RPMSGFS_DT_REG
#define RPMSGFS_DTYPE_MTD       RPMSGFS_DT_MTD
#define RPMSGFS_DTYPE_LINK      RPMSGFS_DT_LNK
#define RPMSGFS_DTYPE_SOCK      RPMSGFS_DT_SOCK

/* need exactly match the definitions from REMOTE's include/sys/stat.h: */

#define RPMSGFS_S_IFIFO         (1 << 12)
#define RPMSGFS_S_IFCHR         (2 << 12)
#define RPMSGFS_S_IFSEM         (3 << 12)
#define RPMSGFS_S_IFDIR         (4 << 12)
#define RPMSGFS_S_IFMQ          (5 << 12)
#define RPMSGFS_S_IFBLK         (6 << 12)
#define RPMSGFS_S_IFREG         (8 << 12)
#define RPMSGFS_S_IFMTD         (9 << 12)
#define RPMSGFS_S_IFLNK         (10 << 12)
#define RPMSGFS_S_IFSOCK        (12 << 12)
#define RPMSGFS_S_IFMT          (15 << 12)

/* need exactly match the definitions from REMOTE's include/fcntl.h: */

#define RPMSGFS_O_RDONLY        BIT(0)
#define RPMSGFS_O_WRONLY        BIT(1)
#define RPMSGFS_O_CREAT         BIT(2)
#define RPMSGFS_O_EXCL          BIT(3)
#define RPMSGFS_O_APPEND        BIT(4)
#define RPMSGFS_O_TRUNC         BIT(5)
#define RPMSGFS_O_NONBLOCK      BIT(6)
#define RPMSGFS_O_SYNC          BIT(7)
#define RPMSGFS_O_BINARY        BIT(8)
#define RPMSGFS_O_DIRECT        BIT(9)
#define RPMSGFS_O_DIRECTORY     BIT(11)
#define RPMSGFS_O_NOFOLLOW      BIT(12)
#define RPMSGFS_O_LARGEFILE     BIT(13)
#define RPMSGFS_O_NOATIME       BIT(18)

/* need exactly match the definition from REMOTE's include/sys/stat.h: */
struct rpmsgfs_stat_msg {
        u32                     dev;       /* Device ID of device containing 
file */
        u32                     mode;      /* File type, attributes, and access 
mode bits */
        u32                     rdev;      /* Device ID (if file is character 
or block special) */
        u16                     ino;       /* File serial number */
        u16                     nlink;     /* Number of hard links to the file 
*/
        s64                     size;      /* Size of file/directory, in bytes 
*/
        s64                     atim_sec;  /* Time of last access, seconds */
        s64                     atim_nsec; /* Time of last access, nanoseconds 
*/
        s64                     mtim_sec;  /* Time of last modification, 
seconds */
        s64                     mtim_nsec; /* Time of last modification, 
nanoseconds */
        s64                     ctim_sec;  /* Time of last status change, 
seconds */
        s64                     ctim_nsec; /* Time of last status change, 
nanoseconds */
        u64                     blocks;    /* Number of blocks allocated */
        s16                     uid;       /* User ID of file */
        s16                     gid;       /* Group ID of file */
        s16                     blksize;   /* Block size used for filesystem 
I/O */
        u16                     reserved;  /* Reserved space */
} __packed;

/* need exactly match the definition from REMOTE's fs/hostfs/hostfs_rpmsg.h: */

#define RPMSGFS_OPEN            1
#define RPMSGFS_CLOSE           2
#define RPMSGFS_READ            3
#define RPMSGFS_WRITE           4
#define RPMSGFS_LSEEK           5
#define RPMSGFS_IOCTL           6
#define RPMSGFS_SYNC            7
#define RPMSGFS_DUP             8
#define RPMSGFS_FSTAT           9
#define RPMSGFS_FTRUNCATE       10
#define RPMSGFS_OPENDIR         11
#define RPMSGFS_READDIR         12
#define RPMSGFS_REWINDDIR       13
#define RPMSGFS_CLOSEDIR        14
#define RPMSGFS_STATFS          15
#define RPMSGFS_UNLINK          16
#define RPMSGFS_MKDIR           17
#define RPMSGFS_RMDIR           18
#define RPMSGFS_RENAME          19
#define RPMSGFS_STAT            20
#define RPMSGFS_FCHSTAT         21
#define RPMSGFS_CHSTAT          22

struct rpmsgfs_header {
        u32                     command;
        s32                     result;
        u64                     cookie;
} __packed;

struct rpmsgfs_open {
        struct rpmsgfs_header   header;
        s32                     flags;
        s32                     mode;
        char                    pathname[0];
} __packed;

struct rpmsgfs_close {
        struct rpmsgfs_header   header;
        s32                     fd;
} __packed;

struct rpmsgfs_read {
        struct rpmsgfs_header   header;
        s32                     fd;
        u32                     count;
        char                    buf[0];
} __packed;

#define rpmsgfs_write           rpmsgfs_read

struct rpmsgfs_lseek {
        struct rpmsgfs_header   header;
        s32                     fd;
        s32                     whence;
        s32                     offset;
} __packed;

struct rpmsgfs_ioctl {
        struct rpmsgfs_header   header;
        s32                     fd;
        s32                     request;
        s32                     arg;
} __packed;

#define rpmsgfs_sync            rpmsgfs_close
#define rpmsgfs_dup             rpmsgfs_close

struct rpmsgfs_fstat {
        struct rpmsgfs_header   header;
        struct rpmsgfs_stat_msg buf;
        union {
                s32             fd;
                char            pathname[0];
        };
} __packed;

struct rpmsgfs_ftruncate {
        struct rpmsgfs_header   header;
        s32                     fd;
        s32                     length;
} __packed;

struct rpmsgfs_opendir {
        struct rpmsgfs_header   header;
        char                    pathname[0];
} __packed;

struct rpmsgfs_readdir {
        struct rpmsgfs_header   header;
        s32                     fd;
        u32                     type;
        char                    name[0];
} __packed;

#define rpmsgfs_rewinddir       rpmsgfs_close
#define rpmsgfs_closedir        rpmsgfs_close

struct rpmsgfs_statfs {
        struct rpmsgfs_header   header;
        u32                     type;
        u32                     reserved;
        u64                     namelen;
        u64                     bsize;
        u64                     blocks;
        u64                     bfree;
        u64                     bavail;
        u64                     files;
        u64                     ffree;
        char                    pathname[0];
} __packed;

#define rpmsgfs_unlink          rpmsgfs_opendir

struct rpmsgfs_mkdir {
        struct rpmsgfs_header   header;
        s32                     mode;
        u32                     reserved;
        char                    pathname[0];
} __packed;

#define rpmsgfs_rmdir           rpmsgfs_opendir
#define rpmsgfs_rename          rpmsgfs_opendir
#define rpmsgfs_stat            rpmsgfs_fstat

struct rpmsgfs_fchstat {
        struct rpmsgfs_header   header;
        struct rpmsgfs_stat_msg buf;
        s32                     flags;
        union {
                s32             fd;
                char            pathname[0];
        };
} __packed;

struct rpmsgfs_filldir_callback {
        struct dir_context      ctx;
        struct rpmsgfs_readdir  *rsp;
        u32                     namelen;
        u32                     space;
};

struct rpmsgfs {
        struct mutex            lock; /* protect files field */
        struct idr              files;
        struct kmem_cache       *cache;
};

MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);

static int rpmsgfs_idr_alloc(struct rpmsgfs *priv, void *ptr)
{
        int id;
        mutex_lock(&priv->lock);
        id = idr_alloc(&priv->files, ptr,
                       RPMSGFS_ID_START, 0, GFP_KERNEL);
        mutex_unlock(&priv->lock);
        return id;
}

static void *rpmsgfs_idr_find(struct rpmsgfs *priv, int id)
{
        void *ptr;
        mutex_lock(&priv->lock);
        ptr = idr_find(&priv->files, id);
        mutex_unlock(&priv->lock);
        return ptr;
}

static void rpmsgfs_idr_remove(struct rpmsgfs *priv, int id)
{
        mutex_lock(&priv->lock);
        idr_remove(&priv->files, id);
        mutex_unlock(&priv->lock);
}

static int rpmsgfs_open_handler(struct rpmsg_device *rpdev,
                                void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_open *msg = data;
        struct file *filp;
        int id, flags = O_RDONLY;

        if (msg->flags & RPMSGFS_O_WRONLY) {
                if (msg->flags & RPMSGFS_O_RDONLY)
                        flags = O_RDWR;
                else
                        flags = O_WRONLY;
        }

        if (msg->flags & RPMSGFS_O_CREAT)
                flags |= O_CREAT;
        if (msg->flags & RPMSGFS_O_EXCL)
                flags |= O_EXCL;
        if (msg->flags & RPMSGFS_O_APPEND)
                flags |= O_APPEND;
        if (msg->flags & RPMSGFS_O_TRUNC)
                flags |= O_TRUNC;
        if (msg->flags & RPMSGFS_O_NONBLOCK)
                flags |= O_NONBLOCK;
        if (msg->flags & RPMSGFS_O_SYNC)
                flags |= O_SYNC;
        if (msg->flags & RPMSGFS_O_DIRECT)
                flags |= O_DIRECT;
        filp = filp_open(msg->pathname, flags, msg->mode);
        if (!IS_ERR(filp)) {
                id = rpmsgfs_idr_alloc(priv, filp);
                if (id < 0)
                        filp_close(filp, NULL);
                msg->header.result = id;
        } else {
                msg->header.result = PTR_ERR(filp);
        }
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_close_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_close *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                rpmsgfs_idr_remove(priv, msg->fd);
                ret = filp_close(filp, NULL);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_read_handler(struct rpmsg_device *rpdev,
                                void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_read *msg = data;
        struct rpmsgfs_read *rsp;
        struct file *filp;
        int ret = -ENOENT;
        int rpmsg_ret;
        u32 space;
        u32 read = 0;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        while (read < msg->count) {
                rsp = rpmsg_get_tx_payload_buffer(rpdev->ept, &space, true);
                if (IS_ERR(rsp))
                        return PTR_ERR(rsp);
                *rsp = *msg;
                space -= sizeof(*msg);
                if (space > msg->count - read)
                        space = msg->count - read;
                if (filp) {
                        void *buf;
                        if (priv->cache)
                                buf = kmem_cache_alloc(priv->cache, GFP_KERNEL);
                        else
                                buf = rsp->buf;
                        ret = kernel_read(filp, buf, space, &filp->f_pos);
                        if (priv->cache && buf) {
                                if (ret > 0)
                                        memcpy(rsp->buf, buf, ret);
                                kmem_cache_free(priv->cache, buf);
                        }
                }
                rsp->header.result = ret;
                rpmsg_ret = rpmsg_send_nocopy(rpdev->ept, rsp,
                                              (ret < 0 ? 0 : ret) + 
sizeof(*rsp));
                if (rpmsg_ret < 0) {
                        dev_err(&rpdev->dev, "rpmsg send failed, ret=%d\n", 
rpmsg_ret);
                        rpmsg_release_tx_buffer(rpdev->ept, rsp);
                }
                if (ret <= 0)
                        break;
                read += ret;
        }
        return rpmsg_ret;
}

static int rpmsgfs_write_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_write *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                void *buf;
                if (priv->cache) {
                        buf = kmem_cache_alloc(priv->cache, GFP_KERNEL);
                        if (buf)
                                memcpy(buf, msg->buf, msg->count);
                } else {
                        buf = msg->buf;
                }
                ret = kernel_write(filp, buf, msg->count, &filp->f_pos);
                if (priv->cache && buf)
                        kmem_cache_free(priv->cache, buf);
        }
        if (msg->header.cookie) {
                msg->header.result = ret;
                return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
        }
        return 0;
}

static int rpmsgfs_lseek_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_lseek *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp)
                ret = vfs_llseek(filp, msg->offset, msg->whence);
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_ioctl_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_ioctl *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp)
                ret = vfs_ioctl(filp, msg->request, msg->arg);
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_sync_handler(struct rpmsg_device *rpdev,
                                void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_sync *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp)
                ret = vfs_fsync(filp, 0);
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_dup_handler(struct rpmsg_device *rpdev,
                               void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_dup *msg = data;
        struct file *filp, *new_filp;
        int id, ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                new_filp = fget(f_dupfd(0, filp, 0));
                if (new_filp) {
                        id = rpmsgfs_idr_alloc(priv, new_filp);
                        if (id < 0)
                                filp_close(new_filp, NULL);
                        ret = id;
                }
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static void
rpmsgfs_stat_convert(struct kstat *hostbuf, struct rpmsgfs_stat_msg *buf)
{
        buf->mode = hostbuf->mode & 0777;
        if (hostbuf->mode & S_IFDIR)
                buf->mode |= RPMSGFS_S_IFDIR;
        else if (hostbuf->mode & S_IFREG)
                buf->mode |= RPMSGFS_S_IFREG;
        else if (hostbuf->mode & S_IFCHR)
                buf->mode |= RPMSGFS_S_IFCHR;
        else if (hostbuf->mode & S_IFBLK)
                buf->mode |= RPMSGFS_S_IFBLK;
        else if (hostbuf->mode & S_IFLNK)
                buf->mode |= RPMSGFS_S_IFLNK;
        else  if (hostbuf->mode & S_IFIFO)
                buf->mode |= RPMSGFS_S_IFIFO;
        else  if (hostbuf->mode & S_IFSOCK)
                buf->mode |= RPMSGFS_S_IFSOCK;
        buf->dev       = hostbuf->dev;
        buf->rdev      = hostbuf->rdev;
#if 0
        buf->uid       = hostbuf->uid;
        buf->gid       = hostbuf->gid;
#endif
        buf->size      = hostbuf->size;
        buf->blksize   = hostbuf->blksize;
        buf->blocks    = hostbuf->blocks;
        buf->atim_sec  = hostbuf->atime.tv_sec;
        buf->atim_nsec = hostbuf->atime.tv_nsec;
        buf->mtim_sec  = hostbuf->mtime.tv_sec;
        buf->mtim_nsec = hostbuf->mtime.tv_nsec;
        buf->ctim_sec  = hostbuf->ctime.tv_sec;
        buf->ctim_nsec = hostbuf->ctime.tv_nsec;
}

static int rpmsgfs_fstat_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_fstat *msg = data;
        struct kstat hostbuf;
        struct file *filp;
        u32 request_mask = STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
                           STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
                           STATX_INO | STATX_SIZE | STATX_BLOCKS;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                ret = vfs_getattr(&filp->f_path, &hostbuf, request_mask,
                                  AT_STATX_SYNC_AS_STAT);
                if (ret == 0)
                        rpmsgfs_stat_convert(&hostbuf, &msg->buf);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int
rpmsgfs_ftruncate_handler(struct rpmsg_device *rpdev,
                          void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_ftruncate *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp)
                ret = vfs_truncate(&filp->f_path, msg->length);
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int
rpmsgfs_opendir_handler(struct rpmsg_device *rpdev,
                        void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_opendir *msg = data;
        struct file *filp;
        int id;
        filp = filp_open(msg->pathname, O_RDONLY | O_DIRECTORY, 0);
        if (!IS_ERR(filp)) {
                id = rpmsgfs_idr_alloc(priv, filp);
                if (id < 0)
                        filp_close(filp, NULL);
                msg->header.result = id;
        } else {
                msg->header.result = PTR_ERR(filp);
        }
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static bool rpmsgfs_filldir(struct dir_context *ctx,
                           const char *name, int namelen,
                           loff_t offset, u64 ino, unsigned int d_type)
{
        struct rpmsgfs_filldir_callback *cb =
                container_of(ctx, struct rpmsgfs_filldir_callback, ctx);
        struct rpmsgfs_readdir *rsp = cb->rsp;
        if (cb->namelen)
                return false;
        if (d_type == DT_REG)
                rsp->type = RPMSGFS_DTYPE_FILE;
        else if (d_type == DT_CHR)
                rsp->type = RPMSGFS_DTYPE_CHR;
        else if (d_type == DT_BLK)
                rsp->type = RPMSGFS_DTYPE_BLK;
        else if (d_type == DT_DIR)
                rsp->type = RPMSGFS_DTYPE_DIRECTORY;
        else
                rsp->type = 0;
        if (namelen >= cb->space)
                namelen = cb->space - 1;
        strncpy(rsp->name, name, namelen);
        rsp->name[namelen++] = 0;
        cb->namelen = namelen;
        return true;
}

static int
rpmsgfs_readdir_handler(struct rpmsg_device *rpdev,
                        void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_filldir_callback cb = {
                .ctx.actor = rpmsgfs_filldir,
                .namelen   = 0,
        };
        struct rpmsgfs_readdir *msg = data;
        struct file *filp;
        int ret = -ENOENT;

        cb.rsp = rpmsg_get_tx_payload_buffer(rpdev->ept, &cb.space, true);
        if (IS_ERR(cb.rsp))
                return PTR_ERR(cb.rsp);
        cb.space -= sizeof(*msg);
        *cb.rsp = *msg;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                ret = iterate_dir(filp, &cb.ctx);
                if (ret == 0 && cb.namelen == 0)
                        ret = -ENOENT;
        }
        cb.rsp->header.result = ret;
        ret = rpmsg_send_nocopy(rpdev->ept, cb.rsp, sizeof(*cb.rsp) + 
cb.namelen);
        if (ret < 0) {
                dev_err(&rpdev->dev, "rpmsg send failed, ret=%d\n", ret);
                rpmsg_release_tx_buffer(rpdev->ept, cb.rsp);
        }

        return ret;
}

static int
rpmsgfs_rewinddir_handler(struct rpmsg_device *rpdev,
                          void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_rewinddir *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                ret = vfs_llseek(filp, 0, SEEK_SET);
                if (ret > 0)
                        ret = -EINVAL;
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int
rpmsgfs_closedir_handler(struct rpmsg_device *rpdev,
                         void *data, int len, void *priv_, u32 src)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct rpmsgfs_closedir *msg = data;
        struct file *filp;
        int ret = -ENOENT;
        filp = rpmsgfs_idr_find(priv, msg->fd);
        if (filp) {
                rpmsgfs_idr_remove(priv, msg->fd);
                ret = filp_close(filp, NULL);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_statfs_handler(struct rpmsg_device *rpdev,
                                  void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_statfs *msg = data;
        struct kstatfs hostbuf;
        struct file *filp;
        int ret;
        filp = filp_open(msg->pathname, 0, 0);
        if (!IS_ERR(filp)) {
                ret = vfs_statfs(&filp->f_path, &hostbuf);
                if (ret == 0) {
                        msg->type = hostbuf.f_type;
                        msg->reserved = 0;
                        msg->namelen = hostbuf.f_namelen;
                        msg->bsize = hostbuf.f_bsize;
                        msg->blocks = hostbuf.f_blocks;
                        msg->bfree = hostbuf.f_bfree;
                        msg->bavail = hostbuf.f_bavail;
                        msg->files = hostbuf.f_files;
                        msg->ffree = hostbuf.f_ffree;
                }
                filp_close(filp, NULL);
        } else {
                ret = PTR_ERR(filp);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_unlink_handler(struct rpmsg_device *rpdev,
                                  void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_unlink *msg = data;
        struct dentry *dentry;
        struct path parent;
        int ret;
        dentry = kern_path_locked(msg->pathname, &parent);
        if (!IS_ERR(dentry)) {
                if (d_really_is_positive(dentry))
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)
                        ret = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
                        ret = vfs_unlink(&init_user_ns, d_inode(parent.dentry), 
dentry, NULL);
#else
                        ret = vfs_unlink(&nop_mnt_idmap, 
d_inode(parent.dentry), dentry, NULL);
#endif
                else
                        ret = -ENOENT;
                dput(dentry);
                inode_unlock(d_inode(parent.dentry));
                path_put(&parent);
        } else {
                ret = PTR_ERR(dentry);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_mkdir_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_mkdir *msg = data;
        struct dentry *dentry;
        struct path path;
        int ret;
        dentry = kern_path_create(AT_FDCWD, msg->pathname,
                                  &path, LOOKUP_DIRECTORY);
        if (!IS_ERR(dentry)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)
                ret = vfs_mkdir(d_inode(path.dentry), dentry, msg->mode);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
                ret = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, 
msg->mode);
#else
                ret = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, 
msg->mode);
#endif
                done_path_create(&path, dentry);
        } else {
                ret = PTR_ERR(dentry);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_rmdir_handler(struct rpmsg_device *rpdev,
                                 void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_rmdir *msg = data;
        struct dentry *dentry;
        struct path parent;
        int ret;
        dentry = kern_path_locked(msg->pathname, &parent);
        if (!IS_ERR(dentry)) {
                if (d_really_is_positive(dentry))
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)
                        ret = vfs_rmdir(d_inode(parent.dentry), dentry);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
                        ret = vfs_rmdir(&init_user_ns, d_inode(parent.dentry), 
dentry);
#else
                        ret = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry), 
dentry);
#endif
                else
                        ret = -ENOENT;
                dput(dentry);
                inode_unlock(d_inode(parent.dentry));
                path_put(&parent);
        } else {
                ret = PTR_ERR(dentry);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_rename_handler(struct rpmsg_device *rpdev,
                                  void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_rename *msg = data;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
        struct renamedata rd;
#endif
        struct path oldpath, newpath;
        struct dentry *newdentry;
        char *oldname, *newname;
        int ret, oldlen;
        oldname = msg->pathname;
        oldlen  = (strlen(msg->pathname) + 1 + 0x7) & ~0x7;
        newname = msg->pathname + oldlen;
        ret = kern_path(oldname, 0, &oldpath);
        if (ret < 0)
                goto fail;
        if (!oldpath.dentry || !oldpath.dentry->d_parent) {
                ret = -ENOENT;
                goto fail1;
        }
        newdentry = kern_path_locked(newname, &newpath);
        if (IS_ERR(newdentry)) {
                ret = PTR_ERR(newdentry);
                goto fail1;
        }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
        rd.old_mnt_idmap = &nop_mnt_idmap;
        rd.new_mnt_idmap = &nop_mnt_idmap;
 #else
        rd.old_mnt_userns = &init_user_ns;
 #endif
        rd.old_dir = oldpath.dentry->d_parent->d_inode;
        rd.old_dentry = oldpath.dentry;
        rd.new_dir = d_inode(newpath.dentry);
        rd.new_dentry = newdentry;
        rd.delegated_inode = NULL;
        rd.flags = 0;
        ret = vfs_rename(&rd);
#else
        ret = vfs_rename(oldpath.dentry->d_parent->d_inode, oldpath.dentry,
                         d_inode(newpath.dentry), newdentry, NULL, 0);
#endif
        dput(newdentry);
        inode_unlock(d_inode(newpath.dentry));
        path_put(&newpath);
fail1:
        path_put(&oldpath);
fail:
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static int rpmsgfs_stat_handler(struct rpmsg_device *rpdev,
                                void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_stat *msg = data;
        struct kstat hostbuf;
        struct file *filp;
        u32 request_mask = STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
                           STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
                           STATX_INO | STATX_SIZE | STATX_BLOCKS;
        int ret;
        filp = filp_open(msg->pathname, 0, 0);
        if (!IS_ERR(filp)) {
                ret = vfs_getattr(&filp->f_path, &hostbuf, request_mask,
                                  AT_STATX_SYNC_AS_STAT);
                if (ret == 0)
                        rpmsgfs_stat_convert(&hostbuf, &msg->buf);
                filp_close(filp, NULL);
        } else {
                ret = PTR_ERR(filp);
        }
        msg->header.result = ret;
        return rpmsg_send(rpdev->ept, msg, sizeof(*msg));
}

static const rpmsg_rx_cb_t rpmsgfs_handler[] = {
        [RPMSGFS_OPEN]      = rpmsgfs_open_handler,
        [RPMSGFS_CLOSE]     = rpmsgfs_close_handler,
        [RPMSGFS_READ]      = rpmsgfs_read_handler,
        [RPMSGFS_WRITE]     = rpmsgfs_write_handler,
        [RPMSGFS_LSEEK]     = rpmsgfs_lseek_handler,
        [RPMSGFS_IOCTL]     = rpmsgfs_ioctl_handler,
        [RPMSGFS_SYNC]      = rpmsgfs_sync_handler,
        [RPMSGFS_DUP]       = rpmsgfs_dup_handler,
        [RPMSGFS_FSTAT]     = rpmsgfs_fstat_handler,
        [RPMSGFS_FTRUNCATE] = rpmsgfs_ftruncate_handler,
        [RPMSGFS_OPENDIR]   = rpmsgfs_opendir_handler,
        [RPMSGFS_READDIR]   = rpmsgfs_readdir_handler,
        [RPMSGFS_REWINDDIR] = rpmsgfs_rewinddir_handler,
        [RPMSGFS_CLOSEDIR]  = rpmsgfs_closedir_handler,
        [RPMSGFS_STATFS]    = rpmsgfs_statfs_handler,
        [RPMSGFS_UNLINK]    = rpmsgfs_unlink_handler,
        [RPMSGFS_MKDIR]     = rpmsgfs_mkdir_handler,
        [RPMSGFS_RMDIR]     = rpmsgfs_rmdir_handler,
        [RPMSGFS_RENAME]    = rpmsgfs_rename_handler,
        [RPMSGFS_STAT]      = rpmsgfs_stat_handler,
};

static int rpmsgfs_callback(struct rpmsg_device *rpdev,
                            void *data, int len, void *priv, u32 src)
{
        struct rpmsgfs_header *header = data;
        u32 cmd = header->command;
        int ret = -EINVAL;
        if (cmd < ARRAY_SIZE(rpmsgfs_handler)) {
                ret = rpmsgfs_handler[cmd](rpdev, data, len, priv, src);
                if (ret < 0)
                        dev_err(&rpdev->dev, "command handle error %d\n", cmd);
        }
        return ret;
}

static int rpmsgfs_probe(struct rpmsg_device *rpdev)
{
        struct device_node *np = rpdev->dev.of_node;
        struct rpmsgfs *priv;
        bool aligned;
        priv = devm_kzalloc(&rpdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
        aligned = of_property_read_bool(np, "memory-aligned-access");
        if (!aligned) {
                /* try the parent node */
                np = of_get_parent(np);
                aligned = of_property_read_bool(np, "memory-aligned-access");
                of_node_put(np);
        }
        if (aligned) {
                int size = max(rpmsg_get_tx_buffer_size(rpdev->ept),
                               rpmsg_get_rx_buffer_size(rpdev->ept));
                priv->cache = kmem_cache_create(dev_name(&rpdev->dev),
                                                size, 8, 0, NULL);
                if (!priv->cache)
                        return -ENOMEM;
        }

        mutex_init(&priv->lock);
        idr_init(&priv->files);
        dev_set_drvdata(&rpdev->dev, priv);
        return 0;
}

static void rpmsgfs_remove(struct rpmsg_device *rpdev)
{
        struct rpmsgfs *priv = dev_get_drvdata(&rpdev->dev);
        struct file *filp;
        int fd;
        idr_for_each_entry(&priv->files, filp, fd)
                filp_close(filp, NULL);
        kmem_cache_destroy(priv->cache);
        mutex_destroy(&priv->lock);
        idr_destroy(&priv->files);
}

static int rpmsgfs_match(struct rpmsg_device *dev, struct rpmsg_driver *drv)
{
        return strncmp(dev->id.name, "rpmsgfs-", 8) == 0;
}

static struct rpmsg_driver rpmsgfs_driver = {
        .drv = {
                .name   = "rpmsgfs",
                .owner  = THIS_MODULE,
        },
        .probe          = rpmsgfs_probe,
        .callback       = rpmsgfs_callback,
        .remove         = rpmsgfs_remove,
        .match          = rpmsgfs_match,
};

module_rpmsg_driver(rpmsgfs_driver);
MODULE_ALIAS("rpmsg:rpmsg fs");
MODULE_AUTHOR("Guiding Li <liguid...@pinecone.net>");
MODULE_DESCRIPTION("rpmsg fs API redirection driver");
MODULE_LICENSE("GPL v2");

Reply via email to