The branch main has been updated by mjg:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=c09f7992714559eaa874f13ea4a1d648f199cd08

commit c09f7992714559eaa874f13ea4a1d648f199cd08
Author:     Mateusz Guzik <[email protected]>
AuthorDate: 2021-01-25 20:17:48 +0000
Commit:     Mateusz Guzik <[email protected]>
CommitDate: 2021-01-25 22:40:15 +0000

    tmpfs: drop acq fence now that vn_load_v_data_smr has consume semantics
---
 sys/fs/tmpfs/tmpfs_subr.c  | 3 ---
 sys/fs/tmpfs/tmpfs_vnops.c | 1 -
 2 files changed, 4 deletions(-)

diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 3b3581fc81f6..84473a439c43 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -356,9 +356,6 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, 
enum vtype type,
                 * pointer to also get the above content in a stable manner.
                 * Worst case tn_link_smr flag may be set to true despite being 
stale,
                 * while the target buffer is already cleared out.
-                *
-                * TODO: Since there is no load consume primitive provided
-                * right now, the load is performed with an acquire fence.
                 */
                atomic_store_ptr(&nnode->tn_link_target, symlink);
                atomic_store_char((char *)&nnode->tn_link_smr, symlink_smr);
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index c716b393efdd..7614287c642e 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -1466,7 +1466,6 @@ tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args 
*v)
 
        vp = v->a_vp;
        node = VP_TO_TMPFS_NODE_SMR(vp);
-       atomic_thread_fence_acq();
        if (__predict_false(node == NULL))
                return (EAGAIN);
        if (!atomic_load_char(&node->tn_link_smr))
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/dev-commits-src-main
To unsubscribe, send any mail to "[email protected]"

Reply via email to