This is an automated email from the ASF dual-hosted git repository.

andk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mynewt-core.git

commit fc91540a45ce84a13ac171345418b29f18ccf9c5
Author: Andrzej Kaczmarek <[email protected]>
AuthorDate: Tue Oct 11 22:51:37 2022 +0200

    fs/littlefs: Update to v2.5.0
---
 fs/littlefs/LICENSE.md                           |   25 +
 fs/littlefs/README.md                            |    2 +-
 fs/littlefs/SPEC.md                              |   14 +-
 fs/littlefs/src/lfs.c                            | 1299 ++++++++++++++--------
 fs/littlefs/{include/littlefs => src}/lfs.h      |   46 +-
 fs/littlefs/src/lfs_util.c                       |    3 +-
 fs/littlefs/{include/littlefs => src}/lfs_util.h |    1 +
 fs/littlefs/src/mynewt_glue.c                    |    4 +-
 8 files changed, 908 insertions(+), 486 deletions(-)

diff --git a/fs/littlefs/LICENSE.md b/fs/littlefs/LICENSE.md
new file mode 100644
index 000000000..e6c3a7bac
--- /dev/null
+++ b/fs/littlefs/LICENSE.md
@@ -0,0 +1,25 @@
+Copyright (c) 2022, The littlefs authors.  
+Copyright (c) 2017, Arm Limited. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without 
modification,
+are permitted provided that the following conditions are met:
+
+-  Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+-  Redistributions in binary form must reproduce the above copyright notice, 
this
+   list of conditions and the following disclaimer in the documentation and/or
+   other materials provided with the distribution.
+-  Neither the name of ARM nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific 
prior
+   written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 
FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/fs/littlefs/README.md b/fs/littlefs/README.md
index a30d43d3b..584ada39d 100644
--- a/fs/littlefs/README.md
+++ b/fs/littlefs/README.md
@@ -192,7 +192,7 @@ More details on how littlefs works can be found in 
[DESIGN.md](DESIGN.md) and
 ## Testing
 
 The littlefs comes with a test suite designed to run on a PC using the
-[emulated block device](emubd/lfs_emubd.h) found in the emubd directory.
+[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
 The tests assume a Linux environment and can be started with make:
 
 ``` bash
diff --git a/fs/littlefs/SPEC.md b/fs/littlefs/SPEC.md
index cc602c1cf..3663ea544 100644
--- a/fs/littlefs/SPEC.md
+++ b/fs/littlefs/SPEC.md
@@ -233,19 +233,19 @@ Metadata tag fields:
    into a 3-bit abstract type and an 8-bit chunk field. Note that the value
    `0x000` is invalid and not assigned a type.
 
-3. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
-   8 categories that facilitate bitmasked lookups.
+    1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
+       8 categories that facilitate bitmasked lookups.
 
-4. **Chunk (8-bits)** - Chunk field used for various purposes by the different
-   abstract types.  type1+chunk+id form a unique identifier for each tag in the
-   metadata block.
+    2. **Chunk (8-bits)** - Chunk field used for various purposes by the 
different
+       abstract types.  type1+chunk+id form a unique identifier for each tag 
in the
+       metadata block.
 
-5. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
+3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
    block gets a unique id which is used to associate tags with that file. The
    special value `0x3ff` is used for any tags that are not associated with a
    file, such as directory and global metadata.
 
-6. **Length (10-bits)** - Length of the data in bytes. The special value
+4. **Length (10-bits)** - Length of the data in bytes. The special value
    `0x3ff` indicates that this tag has been deleted.
 
 ## Metadata types
diff --git a/fs/littlefs/src/lfs.c b/fs/littlefs/src/lfs.c
index c517c004b..117595e04 100644
--- a/fs/littlefs/src/lfs.c
+++ b/fs/littlefs/src/lfs.c
@@ -1,16 +1,33 @@
 /*
  * The little filesystem
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  */
-#include "littlefs/lfs.h"
-#include "littlefs/lfs_util.h"
+#include "lfs.h"
+#include "lfs_util.h"
 
+
+// some constants used throughout the code
 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
 
+enum {
+    LFS_OK_RELOCATED = 1,
+    LFS_OK_DROPPED   = 2,
+    LFS_OK_ORPHANED  = 3,
+};
+
+enum {
+    LFS_CMP_EQ = 0,
+    LFS_CMP_LT = 1,
+    LFS_CMP_GT = 2,
+};
+
+
 /// Caching block device operations ///
+
 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
     // do not zero, cheaper if cache is readonly or only going to be
     // written with identical data (during relocates)
@@ -107,12 +124,6 @@ static int lfs_bd_read(lfs_t *lfs,
     return 0;
 }
 
-enum {
-    LFS_CMP_EQ = 0,
-    LFS_CMP_LT = 1,
-    LFS_CMP_GT = 2,
-};
-
 static int lfs_bd_cmp(lfs_t *lfs,
         const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
         lfs_block_t block, lfs_off_t off,
@@ -268,22 +279,26 @@ static inline int lfs_pair_cmp(
              paira[0] == pairb[1] || paira[1] == pairb[0]);
 }
 
+#ifndef LFS_READONLY
 static inline bool lfs_pair_sync(
         const lfs_block_t paira[2],
         const lfs_block_t pairb[2]) {
     return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
            (paira[0] == pairb[1] && paira[1] == pairb[0]);
 }
+#endif
 
 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
     pair[0] = lfs_fromle32(pair[0]);
     pair[1] = lfs_fromle32(pair[1]);
 }
 
+#ifndef LFS_READONLY
 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
     pair[0] = lfs_tole32(pair[0]);
     pair[1] = lfs_tole32(pair[1]);
 }
+#endif
 
 // operations on 32-bit entry tags
 typedef uint32_t lfs_tag_t;
@@ -365,6 +380,7 @@ static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) 
{
     return true;
 }
 
+#ifndef LFS_READONLY
 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
     return lfs_tag_size(a->tag);
 }
@@ -376,6 +392,7 @@ static inline uint8_t lfs_gstate_getorphans(const 
lfs_gstate_t *a) {
 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
     return lfs_tag_type1(a->tag);
 }
+#endif
 
 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
         const lfs_block_t *pair) {
@@ -388,11 +405,13 @@ static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
     a->pair[1] = lfs_fromle32(a->pair[1]);
 }
 
+#ifndef LFS_READONLY
 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
     a->tag     = lfs_tole32(a->tag);
     a->pair[0] = lfs_tole32(a->pair[0]);
     a->pair[1] = lfs_tole32(a->pair[1]);
 }
+#endif
 
 // other endianness operations
 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
@@ -416,6 +435,7 @@ static inline void lfs_superblock_fromle32(lfs_superblock_t 
*superblock) {
     superblock->attr_max    = lfs_fromle32(superblock->attr_max);
 }
 
+#ifndef LFS_READONLY
 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
     superblock->version     = lfs_tole32(superblock->version);
     superblock->block_size  = lfs_tole32(superblock->block_size);
@@ -424,6 +444,7 @@ static inline void lfs_superblock_tole32(lfs_superblock_t 
*superblock) {
     superblock->file_max    = lfs_tole32(superblock->file_max);
     superblock->attr_max    = lfs_tole32(superblock->attr_max);
 }
+#endif
 
 #ifndef LFS_NO_ASSERT
 static bool lfs_mlist_isopen(struct lfs_mlist *head,
@@ -460,13 +481,15 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
 static int lfs_dir_compact(lfs_t *lfs,
         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
         lfs_mdir_t *source, uint16_t begin, uint16_t end);
-
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+        const void *buffer, lfs_size_t size);
 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
         const void *buffer, lfs_size_t size);
 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
 
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
 static void lfs_fs_prepmove(lfs_t *lfs,
         uint16_t id, const lfs_block_t pair[2]);
@@ -474,8 +497,6 @@ static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
         lfs_mdir_t *pdir);
 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
         lfs_mdir_t *parent);
-static int lfs_fs_relocate(lfs_t *lfs,
-        const lfs_block_t oldpair[2], lfs_block_t newpair[2]);
 static int lfs_fs_forceconsistency(lfs_t *lfs);
 #endif
 
@@ -486,6 +507,8 @@ static int lfs1_traverse(lfs_t *lfs,
 
 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
 
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
+        void *buffer, lfs_size_t size);
 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
         void *buffer, lfs_size_t size);
 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
@@ -726,6 +749,7 @@ static int lfs_dir_traverse_filter(void *p,
             (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
                 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
                     (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
+        *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
         return true;
     }
 
@@ -740,98 +764,232 @@ static int lfs_dir_traverse_filter(void *p,
 #endif
 
 #ifndef LFS_READONLY
+// maximum recursive depth of lfs_dir_traverse, the deepest call:
+//
+// traverse with commit
+// '-> traverse with move
+//     '-> traverse with filter
+//
+#define LFS_DIR_TRAVERSE_DEPTH 3
+
+struct lfs_dir_traverse {
+    const lfs_mdir_t *dir;
+    lfs_off_t off;
+    lfs_tag_t ptag;
+    const struct lfs_mattr *attrs;
+    int attrcount;
+
+    lfs_tag_t tmask;
+    lfs_tag_t ttag;
+    uint16_t begin;
+    uint16_t end;
+    int16_t diff;
+
+    int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
+    void *data;
+
+    lfs_tag_t tag;
+    const void *buffer;
+    struct lfs_diskoff disk;
+};
+
 static int lfs_dir_traverse(lfs_t *lfs,
         const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
         const struct lfs_mattr *attrs, int attrcount,
         lfs_tag_t tmask, lfs_tag_t ttag,
         uint16_t begin, uint16_t end, int16_t diff,
         int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
+    // This function in inherently recursive, but bounded. To allow tool-based
+    // analysis without unnecessary code-cost we use an explicit stack
+    struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
+    unsigned sp = 0;
+    int res;
+
     // iterate over directory and attrs
+    lfs_tag_t tag;
+    const void *buffer;
+    struct lfs_diskoff disk;
     while (true) {
-        lfs_tag_t tag;
-        const void *buffer;
-        struct lfs_diskoff disk;
-        if (off+lfs_tag_dsize(ptag) < dir->off) {
-            off += lfs_tag_dsize(ptag);
-            int err = lfs_bd_read(lfs,
-                    NULL, &lfs->rcache, sizeof(tag),
-                    dir->pair[0], off, &tag, sizeof(tag));
-            if (err) {
-                return err;
-            }
-
-            tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
-            disk.block = dir->pair[0];
-            disk.off = off+sizeof(lfs_tag_t);
-            buffer = &disk;
-            ptag = tag;
-        } else if (attrcount > 0) {
-            tag = attrs[0].tag;
-            buffer = attrs[0].buffer;
-            attrs += 1;
-            attrcount -= 1;
-        } else {
-            return 0;
-        }
-
-        lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
-        if ((mask & tmask & tag) != (mask & tmask & ttag)) {
-            continue;
-        }
+        {
+            if (off+lfs_tag_dsize(ptag) < dir->off) {
+                off += lfs_tag_dsize(ptag);
+                int err = lfs_bd_read(lfs,
+                        NULL, &lfs->rcache, sizeof(tag),
+                        dir->pair[0], off, &tag, sizeof(tag));
+                if (err) {
+                    return err;
+                }
 
-        // do we need to filter? inlining the filtering logic here allows
-        // for some minor optimizations
-        if (lfs_tag_id(tmask) != 0) {
-            // scan for duplicates and update tag based on creates/deletes
-            int filter = lfs_dir_traverse(lfs,
-                    dir, off, ptag, attrs, attrcount,
-                    0, 0, 0, 0, 0,
-                    lfs_dir_traverse_filter, &tag);
-            if (filter < 0) {
-                return filter;
+                tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
+                disk.block = dir->pair[0];
+                disk.off = off+sizeof(lfs_tag_t);
+                buffer = &disk;
+                ptag = tag;
+            } else if (attrcount > 0) {
+                tag = attrs[0].tag;
+                buffer = attrs[0].buffer;
+                attrs += 1;
+                attrcount -= 1;
+            } else {
+                // finished traversal, pop from stack?
+                res = 0;
+                break;
             }
 
-            if (filter) {
+            // do we need to filter?
+            lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
+            if ((mask & tmask & tag) != (mask & tmask & ttag)) {
                 continue;
             }
 
-            // in filter range?
-            if (!(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
+            if (lfs_tag_id(tmask) != 0) {
+                LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
+                // recurse, scan for duplicates, and update tag based on
+                // creates/deletes
+                stack[sp] = (struct lfs_dir_traverse){
+                    .dir        = dir,
+                    .off        = off,
+                    .ptag       = ptag,
+                    .attrs      = attrs,
+                    .attrcount  = attrcount,
+                    .tmask      = tmask,
+                    .ttag       = ttag,
+                    .begin      = begin,
+                    .end        = end,
+                    .diff       = diff,
+                    .cb         = cb,
+                    .data       = data,
+                    .tag        = tag,
+                    .buffer     = buffer,
+                    .disk       = disk,
+                };
+                sp += 1;
+
+                dir = dir;
+                off = off;
+                ptag = ptag;
+                attrs = attrs;
+                attrcount = attrcount;
+                tmask = 0;
+                ttag = 0;
+                begin = 0;
+                end = 0;
+                diff = 0;
+                cb = lfs_dir_traverse_filter;
+                data = &stack[sp-1].tag;
                 continue;
             }
         }
 
+popped:
+        // in filter range?
+        if (lfs_tag_id(tmask) != 0 &&
+                !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
+            continue;
+        }
+
         // handle special cases for mcu-side operations
         if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
             // do nothing
         } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
+            // Without this condition, lfs_dir_traverse can exhibit an
+            // extremely expensive O(n^3) of nested loops when renaming.
+            // This happens because lfs_dir_traverse tries to filter tags by
+            // the tags in the source directory, triggering a second
+            // lfs_dir_traverse with its own filter operation.
+            //
+            // traverse with commit
+            // '-> traverse with filter
+            //     '-> traverse with move
+            //         '-> traverse with filter
+            //
+            // However we don't actually care about filtering the second set of
+            // tags, since duplicate tags have no effect when filtering.
+            //
+            // This check skips this unnecessary recursive filtering 
explicitly,
+            // reducing this runtime from O(n^3) to O(n^2).
+            if (cb == lfs_dir_traverse_filter) {
+                continue;
+            }
+
+            // recurse into move
+            stack[sp] = (struct lfs_dir_traverse){
+                .dir        = dir,
+                .off        = off,
+                .ptag       = ptag,
+                .attrs      = attrs,
+                .attrcount  = attrcount,
+                .tmask      = tmask,
+                .ttag       = ttag,
+                .begin      = begin,
+                .end        = end,
+                .diff       = diff,
+                .cb         = cb,
+                .data       = data,
+                .tag        = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
+            };
+            sp += 1;
+
             uint16_t fromid = lfs_tag_size(tag);
             uint16_t toid = lfs_tag_id(tag);
-            int err = lfs_dir_traverse(lfs,
-                    buffer, 0, 0xffffffff, NULL, 0,
-                    LFS_MKTAG(0x600, 0x3ff, 0),
-                    LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
-                    fromid, fromid+1, toid-fromid+diff,
-                    cb, data);
-            if (err) {
-                return err;
-            }
+            dir = buffer;
+            off = 0;
+            ptag = 0xffffffff;
+            attrs = NULL;
+            attrcount = 0;
+            tmask = LFS_MKTAG(0x600, 0x3ff, 0);
+            ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
+            begin = fromid;
+            end = fromid+1;
+            diff = toid-fromid+diff;
         } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
             for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
                 const struct lfs_attr *a = buffer;
-                int err = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
+                res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
                         lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
-                if (err) {
-                    return err;
+                if (res < 0) {
+                    return res;
+                }
+
+                if (res) {
+                    break;
                 }
             }
         } else {
-            int err = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
-            if (err) {
-                return err;
+            res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
+            if (res < 0) {
+                return res;
+            }
+
+            if (res) {
+                break;
             }
         }
     }
+
+    if (sp > 0) {
+        // pop from the stack and return, fortunately all pops share
+        // a destination
+        dir         = stack[sp-1].dir;
+        off         = stack[sp-1].off;
+        ptag        = stack[sp-1].ptag;
+        attrs       = stack[sp-1].attrs;
+        attrcount   = stack[sp-1].attrcount;
+        tmask       = stack[sp-1].tmask;
+        ttag        = stack[sp-1].ttag;
+        begin       = stack[sp-1].begin;
+        end         = stack[sp-1].end;
+        diff        = stack[sp-1].diff;
+        cb          = stack[sp-1].cb;
+        data        = stack[sp-1].data;
+        tag         = stack[sp-1].tag;
+        buffer      = stack[sp-1].buffer;
+        disk        = stack[sp-1].disk;
+        sp -= 1;
+        goto popped;
+    } else {
+        return res;
+    }
 }
 #endif
 
@@ -1449,7 +1607,7 @@ static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
         }
     }
 
-    // zero for reproducability in case initial block is unreadable
+    // zero for reproducibility in case initial block is unreadable
     dir->rev = 0;
 
     // rather than clobbering one of the blocks we just pretend
@@ -1508,8 +1666,7 @@ static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, 
lfs_mdir_t *tail) {
 static int lfs_dir_split(lfs_t *lfs,
         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
         lfs_mdir_t *source, uint16_t split, uint16_t end) {
-    // create tail directory
-    lfs_alloc_ack(lfs);
+    // create tail metadata pair
     lfs_mdir_t tail;
     int err = lfs_dir_alloc(lfs, &tail);
     if (err) {
@@ -1520,9 +1677,10 @@ static int lfs_dir_split(lfs_t *lfs,
     tail.tail[0] = dir->tail[0];
     tail.tail[1] = dir->tail[1];
 
-    err = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
-    if (err) {
-        return err;
+    // note we don't care about LFS_OK_RELOCATED
+    int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, 
end);
+    if (res < 0) {
+        return res;
     }
 
     dir->tail[0] = tail.pair[0];
@@ -1563,107 +1721,45 @@ static int lfs_dir_commit_commit(void *p, lfs_tag_t 
tag, const void *buffer) {
 }
 #endif
 
+#ifndef LFS_READONLY
+static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
+    // If our revision count == n * block_cycles, we should force a relocation,
+    // this is how littlefs wear-levels at the metadata-pair level. Note that 
we
+    // actually use (block_cycles+1)|1, this is to avoid two corner cases:
+    // 1. block_cycles = 1, which would prevent relocations from terminating
+    // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
+    //    one metadata block in the pair, effectively making this useless
+    return (lfs->cfg->block_cycles > 0
+            && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
+}
+#endif
+
 #ifndef LFS_READONLY
 static int lfs_dir_compact(lfs_t *lfs,
         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
         lfs_mdir_t *source, uint16_t begin, uint16_t end) {
     // save some state in case block is bad
-    const lfs_block_t oldpair[2] = {dir->pair[0], dir->pair[1]};
     bool relocated = false;
-    bool tired = false;
-
-    // should we split?
-    while (end - begin > 1) {
-        // find size
-        lfs_size_t size = 0;
-        int err = lfs_dir_traverse(lfs,
-                source, 0, 0xffffffff, attrs, attrcount,
-                LFS_MKTAG(0x400, 0x3ff, 0),
-                LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
-                begin, end, -begin,
-                lfs_dir_commit_size, &size);
-        if (err) {
-            return err;
-        }
-
-        // space is complicated, we need room for tail, crc, gstate,
-        // cleanup delete, and we cap at half a block to give room
-        // for metadata updates.
-        if (end - begin < 0xff &&
-                size <= lfs_min(lfs->cfg->block_size - 36,
-                    lfs_alignup((lfs->cfg->metadata_max ?
-                            lfs->cfg->metadata_max : lfs->cfg->block_size)/2,
-                        lfs->cfg->prog_size))) {
-            break;
-        }
-
-        // can't fit, need to split, we should really be finding the
-        // largest size that fits with a small binary search, but right now
-        // it's not worth the code size
-        uint16_t split = (end - begin) / 2;
-        err = lfs_dir_split(lfs, dir, attrs, attrcount,
-                source, begin+split, end);
-        if (err) {
-            // if we fail to split, we may be able to overcompact, unless
-            // we're too big for even the full block, in which case our
-            // only option is to error
-            if (err == LFS_ERR_NOSPC && size <= lfs->cfg->block_size - 36) {
-                break;
-            }
-            return err;
-        }
-
-        end = begin + split;
-    }
+    bool tired = lfs_dir_needsrelocation(lfs, dir);
 
     // increment revision count
     dir->rev += 1;
-    // If our revision count == n * block_cycles, we should force a relocation,
-    // this is how littlefs wear-levels at the metadata-pair level. Note that 
we
-    // actually use (block_cycles+1)|1, this is to avoid two corner cases:
-    // 1. block_cycles = 1, which would prevent relocations from terminating
-    // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
-    //    one metadata block in the pair, effectively making this useless
-    if (lfs->cfg->block_cycles > 0 &&
-            (dir->rev % ((lfs->cfg->block_cycles+1)|1) == 0)) {
-        if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
-            // oh no! we're writing too much to the superblock,
-            // should we expand?
-            lfs_ssize_t res = lfs_fs_rawsize(lfs);
-            if (res < 0) {
-                return res;
-            }
-
-            // do we have extra space? littlefs can't reclaim this space
-            // by itself, so expand cautiously
-            if ((lfs_size_t)res < lfs->cfg->block_count/2) {
-                LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
-                int err = lfs_dir_split(lfs, dir, attrs, attrcount,
-                        source, begin, end);
-                if (err && err != LFS_ERR_NOSPC) {
-                    return err;
-                }
 
-                // welp, we tried, if we ran out of space there's not much
-                // we can do, we'll error later if we've become frozen
-                if (!err) {
-                    end = begin;
-                }
-            }
+    // do not proactively relocate blocks during migrations, this
+    // can cause a number of failure states such: clobbering the
+    // v1 superblock if we relocate root, and invalidating directory
+    // pointers if we relocate the head of a directory. On top of
+    // this, relocations increase the overall complexity of
+    // lfs_migration, which is already a delicate operation.
 #ifdef LFS_MIGRATE
-        } else if (lfs->lfs1) {
-            // do not proactively relocate blocks during migrations, this
-            // can cause a number of failure states such: clobbering the
-            // v1 superblock if we relocate root, and invalidating directory
-            // pointers if we relocate the head of a directory. On top of
-            // this, relocations increase the overall complexity of
-            // lfs_migration, which is already a delicate operation.
+    if (lfs->lfs1) {
+        tired = false;
+    }
 #endif
-        } else {
-            // we're writing too much, time to relocate
-            tired = true;
-            goto relocate;
-        }
+
+    if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
+        // we're writing too much, time to relocate
+        goto relocate;
     }
 
     // begin loop to commit compaction to blocks until a compact sticks
@@ -1807,44 +1903,114 @@ relocate:
         continue;
     }
 
-    if (relocated) {
-        // update references if we relocated
-        LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
-                    "-> {0x%"PRIx32", 0x%"PRIx32"}",
-                oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
-        int err = lfs_fs_relocate(lfs, oldpair, dir->pair);
-        if (err) {
-            return err;
-        }
-    }
-
-    return 0;
+    return relocated ? LFS_OK_RELOCATED : 0;
 }
 #endif
 
 #ifndef LFS_READONLY
-static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
-        const struct lfs_mattr *attrs, int attrcount) {
-    // check for any inline files that aren't RAM backed and
-    // forcefully evict them, needed for filesystem consistency
-    for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
-        if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
-                f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
-                f->ctz.size > lfs->cfg->cache_size) {
-            int err = lfs_file_outline(lfs, f);
+static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
+        const struct lfs_mattr *attrs, int attrcount,
+        lfs_mdir_t *source, uint16_t begin, uint16_t end) {
+    while (true) {
+        // find size of first split, we do this by halving the split until
+        // the metadata is guaranteed to fit
+        //
+        // Note that this isn't a true binary search, we never increase the
+        // split size. This may result in poorly distributed metadata but isn't
+        // worth the extra code size or performance hit to fix.
+        lfs_size_t split = begin;
+        while (end - split > 1) {
+            lfs_size_t size = 0;
+            int err = lfs_dir_traverse(lfs,
+                    source, 0, 0xffffffff, attrs, attrcount,
+                    LFS_MKTAG(0x400, 0x3ff, 0),
+                    LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
+                    split, end, -split,
+                    lfs_dir_commit_size, &size);
             if (err) {
                 return err;
             }
 
-            err = lfs_file_flush(lfs, f);
-            if (err) {
+            // space is complicated, we need room for tail, crc, gstate,
+            // cleanup delete, and we cap at half a block to give room
+            // for metadata updates.
+            if (end - split < 0xff
+                    && size <= lfs_min(lfs->cfg->block_size - 36,
+                        lfs_alignup(
+                            (lfs->cfg->metadata_max
+                                ? lfs->cfg->metadata_max
+                                : lfs->cfg->block_size)/2,
+                            lfs->cfg->prog_size))) {
+                break;
+            }
+
+            split = split + ((end - split) / 2);
+        }
+
+        if (split == begin) {
+            // no split needed
+            break;
+        }
+
+        // split into two metadata pairs and continue
+        int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+                source, split, end);
+        if (err && err != LFS_ERR_NOSPC) {
+            return err;
+        }
+
+        if (err) {
+            // we can't allocate a new block, try to compact with degraded
+            // performance
+            LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
+                    dir->pair[0], dir->pair[1]);
+            break;
+        } else {
+            end = split;
+        }
+    }
+
+    if (lfs_dir_needsrelocation(lfs, dir)
+            && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
+        // oh no! we're writing too much to the superblock,
+        // should we expand?
+        lfs_ssize_t size = lfs_fs_rawsize(lfs);
+        if (size < 0) {
+            return size;
+        }
+
+        // do we have extra space? littlefs can't reclaim this space
+        // by itself, so expand cautiously
+        if ((lfs_size_t)size < lfs->cfg->block_count/2) {
+            LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
+            int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+                    source, begin, end);
+            if (err && err != LFS_ERR_NOSPC) {
                 return err;
             }
+
+            if (err) {
+                // welp, we tried, if we ran out of space there's not much
+                // we can do, we'll error later if we've become frozen
+                LFS_WARN("Unable to expand superblock");
+            } else {
+                end = begin;
+            }
         }
     }
 
+    return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
+        const lfs_block_t pair[2],
+        const struct lfs_mattr *attrs, int attrcount,
+        lfs_mdir_t *pdir) {
+    int state = 0;
+
     // calculate changes to the directory
-    lfs_mdir_t olddir = *dir;
     bool hasdelete = false;
     for (int i = 0; i < attrcount; i++) {
         if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
@@ -1863,23 +2029,19 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
 
     // should we actually drop the directory block?
     if (hasdelete && dir->count == 0) {
-        lfs_mdir_t pdir;
-        int err = lfs_fs_pred(lfs, dir->pair, &pdir);
+        LFS_ASSERT(pdir);
+        int err = lfs_fs_pred(lfs, dir->pair, pdir);
         if (err && err != LFS_ERR_NOENT) {
-            *dir = olddir;
             return err;
         }
 
-        if (err != LFS_ERR_NOENT && pdir.split) {
-            err = lfs_dir_drop(lfs, &pdir, dir);
-            if (err) {
-                *dir = olddir;
-                return err;
-            }
+        if (err != LFS_ERR_NOENT && pdir->split) {
+            state = LFS_OK_DROPPED;
+            goto fixmlist;
         }
     }
 
-    if (dir->erased || dir->count >= 0xff) {
+    if (dir->erased) {
         // try to commit
         struct lfs_commit commit = {
             .block = dir->pair[0],
@@ -1904,7 +2066,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
             if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
                 goto compact;
             }
-            *dir = olddir;
             return err;
         }
 
@@ -1917,7 +2078,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
         if (!lfs_gstate_iszero(&delta)) {
             err = lfs_dir_getgstate(lfs, dir, &delta);
             if (err) {
-                *dir = olddir;
                 return err;
             }
 
@@ -1929,7 +2089,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
                 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
                     goto compact;
                 }
-                *dir = olddir;
                 return err;
             }
         }
@@ -1940,7 +2099,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
             if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
                 goto compact;
             }
-            *dir = olddir;
             return err;
         }
 
@@ -1951,61 +2109,279 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
         // and update gstate
         lfs->gdisk = lfs->gstate;
         lfs->gdelta = (lfs_gstate_t){0};
-    } else {
+
+        goto fixmlist;
+    }
+
 compact:
-        // fall back to compaction
-        lfs_cache_drop(lfs, &lfs->pcache);
+    // fall back to compaction
+    lfs_cache_drop(lfs, &lfs->pcache);
+
+    state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
+            dir, 0, dir->count);
+    if (state < 0) {
+        return state;
+    }
+
+    goto fixmlist;
+
+fixmlist:;
+    // this complicated bit of logic is for fixing up any active
+    // metadata-pairs that we may have affected
+    //
+    // note we have to make two passes since the mdir passed to
+    // lfs_dir_commit could also be in this list, and even then
+    // we need to copy the pair so they don't get clobbered if we refetch
+    // our mdir.
+    lfs_block_t oldpair[2] = {pair[0], pair[1]};
+    for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+        if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
+            d->m = *dir;
+            if (d->m.pair != pair) {
+                for (int i = 0; i < attrcount; i++) {
+                    if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
+                            d->id == lfs_tag_id(attrs[i].tag)) {
+                        d->m.pair[0] = LFS_BLOCK_NULL;
+                        d->m.pair[1] = LFS_BLOCK_NULL;
+                    } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE 
&&
+                            d->id > lfs_tag_id(attrs[i].tag)) {
+                        d->id -= 1;
+                        if (d->type == LFS_TYPE_DIR) {
+                            ((lfs_dir_t*)d)->pos -= 1;
+                        }
+                    } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE 
&&
+                            d->id >= lfs_tag_id(attrs[i].tag)) {
+                        d->id += 1;
+                        if (d->type == LFS_TYPE_DIR) {
+                            ((lfs_dir_t*)d)->pos += 1;
+                        }
+                    }
+                }
+            }
+
+            while (d->id >= d->m.count && d->m.split) {
+                // we split and id is on tail now
+                d->id -= d->m.count;
+                int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
+                if (err) {
+                    return err;
+                }
+            }
+        }
+    }
+
+    return state;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
+        const struct lfs_mattr *attrs, int attrcount) {
+    // check for any inline files that aren't RAM backed and
+    // forcefully evict them, needed for filesystem consistency
+    for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
+        if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
+                f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
+                f->ctz.size > lfs->cfg->cache_size) {
+            int err = lfs_file_outline(lfs, f);
+            if (err) {
+                return err;
+            }
+
+            err = lfs_file_flush(lfs, f);
+            if (err) {
+                return err;
+            }
+        }
+    }
+
+    lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
+    lfs_mdir_t ldir = *dir;
+    lfs_mdir_t pdir;
+    int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
+            attrs, attrcount, &pdir);
+    if (state < 0) {
+        return state;
+    }
+
+    // update if we're not in mlist, note we may have already been
+    // updated if we are in mlist
+    if (lfs_pair_cmp(dir->pair, lpair) == 0) {
+        *dir = ldir;
+    }
+
+    // commit was successful, but may require other changes in the
+    // filesystem, these would normally be tail recursive, but we have
+    // flattened them here avoid unbounded stack usage
+
+    // need to drop?
+    if (state == LFS_OK_DROPPED) {
+        // steal state
+        int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
+        if (err) {
+            return err;
+        }
+
+        // steal tail, note that this can't create a recursive drop
+        lpair[0] = pdir.pair[0];
+        lpair[1] = pdir.pair[1];
+        lfs_pair_tole32(dir->tail);
+        state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+                    {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
+                        dir->tail}),
+                NULL);
+        lfs_pair_fromle32(dir->tail);
+        if (state < 0) {
+            return state;
+        }
+
+        ldir = pdir;
+    }
+
+    // need to relocate?
+    bool orphans = false;
+    while (state == LFS_OK_RELOCATED) {
+        LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
+                    "-> {0x%"PRIx32", 0x%"PRIx32"}",
+                lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
+        state = 0;
+
+        // update internal root
+        if (lfs_pair_cmp(lpair, lfs->root) == 0) {
+            lfs->root[0] = ldir.pair[0];
+            lfs->root[1] = ldir.pair[1];
+        }
+
+        // update internally tracked dirs
+        for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+            if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
+                d->m.pair[0] = ldir.pair[0];
+                d->m.pair[1] = ldir.pair[1];
+            }
+
+            if (d->type == LFS_TYPE_DIR &&
+                    lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
+                ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
+                ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
+            }
+        }
 
-        int err = lfs_dir_compact(lfs, dir, attrs, attrcount,
-                dir, 0, dir->count);
-        if (err) {
-            *dir = olddir;
-            return err;
+        // find parent
+        lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
+        if (tag < 0 && tag != LFS_ERR_NOENT) {
+            return tag;
         }
-    }
 
-    // this complicated bit of logic is for fixing up any active
-    // metadata-pairs that we may have affected
-    //
-    // note we have to make two passes since the mdir passed to
-    // lfs_dir_commit could also be in this list, and even then
-    // we need to copy the pair so they don't get clobbered if we refetch
-    // our mdir.
-    for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
-        if (&d->m != dir && lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
-            d->m = *dir;
-            for (int i = 0; i < attrcount; i++) {
-                if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
-                        d->id == lfs_tag_id(attrs[i].tag)) {
-                    d->m.pair[0] = LFS_BLOCK_NULL;
-                    d->m.pair[1] = LFS_BLOCK_NULL;
-                } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
-                        d->id > lfs_tag_id(attrs[i].tag)) {
-                    d->id -= 1;
-                    if (d->type == LFS_TYPE_DIR) {
-                        ((lfs_dir_t*)d)->pos -= 1;
-                    }
-                } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
-                        d->id >= lfs_tag_id(attrs[i].tag)) {
-                    d->id += 1;
-                    if (d->type == LFS_TYPE_DIR) {
-                        ((lfs_dir_t*)d)->pos += 1;
-                    }
+        bool hasparent = (tag != LFS_ERR_NOENT);
+        if (tag != LFS_ERR_NOENT) {
+            // note that if we have a parent, we must have a pred, so this will
+            // always create an orphan
+            int err = lfs_fs_preporphans(lfs, +1);
+            if (err) {
+                return err;
+            }
+
+            // fix pending move in this pair? this looks like an optimization 
but
+            // is in fact _required_ since relocating may outdate the move.
+            uint16_t moveid = 0x3ff;
+            if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+                moveid = lfs_tag_id(lfs->gstate.tag);
+                LFS_DEBUG("Fixing move while relocating "
+                        "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+                        pdir.pair[0], pdir.pair[1], moveid);
+                lfs_fs_prepmove(lfs, 0x3ff, NULL);
+                if (moveid < lfs_tag_id(tag)) {
+                    tag -= LFS_MKTAG(0, 1, 0);
                 }
             }
+
+            lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
+            lfs_pair_tole32(ldir.pair);
+            state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
+                        {LFS_MKTAG_IF(moveid != 0x3ff,
+                            LFS_TYPE_DELETE, moveid, 0), NULL},
+                        {tag, ldir.pair}),
+                    NULL);
+            lfs_pair_fromle32(ldir.pair);
+            if (state < 0) {
+                return state;
+            }
+
+            if (state == LFS_OK_RELOCATED) {
+                lpair[0] = ppair[0];
+                lpair[1] = ppair[1];
+                ldir = pdir;
+                orphans = true;
+                continue;
+            }
         }
-    }
 
-    for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
-        if (lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
-            while (d->id >= d->m.count && d->m.split) {
-                // we split and id is on tail now
-                d->id -= d->m.count;
-                int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
+        // find pred
+        int err = lfs_fs_pred(lfs, lpair, &pdir);
+        if (err && err != LFS_ERR_NOENT) {
+            return err;
+        }
+        LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
+
+        // if we can't find dir, it must be new
+        if (err != LFS_ERR_NOENT) {
+            if (lfs_gstate_hasorphans(&lfs->gstate)) {
+                // next step, clean up orphans
+                err = lfs_fs_preporphans(lfs, -hasparent);
                 if (err) {
                     return err;
                 }
             }
+
+            // fix pending move in this pair? this looks like an optimization
+            // but is in fact _required_ since relocating may outdate the move.
+            uint16_t moveid = 0x3ff;
+            if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+                moveid = lfs_tag_id(lfs->gstate.tag);
+                LFS_DEBUG("Fixing move while relocating "
+                        "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+                        pdir.pair[0], pdir.pair[1], moveid);
+                lfs_fs_prepmove(lfs, 0x3ff, NULL);
+            }
+
+            // replace bad pair, either we clean up desync, or no desync 
occured
+            lpair[0] = pdir.pair[0];
+            lpair[1] = pdir.pair[1];
+            lfs_pair_tole32(ldir.pair);
+            state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+                        {LFS_MKTAG_IF(moveid != 0x3ff,
+                            LFS_TYPE_DELETE, moveid, 0), NULL},
+                        {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
+                            ldir.pair}),
+                    NULL);
+            lfs_pair_fromle32(ldir.pair);
+            if (state < 0) {
+                return state;
+            }
+
+            ldir = pdir;
+        }
+    }
+
+    return orphans ? LFS_OK_ORPHANED : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
+        const struct lfs_mattr *attrs, int attrcount) {
+    int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
+    if (orphans < 0) {
+        return orphans;
+    }
+
+    if (orphans) {
+        // make sure we've removed all orphans, this is a noop if there
+        // are none, but if we had nested blocks failures we may have
+        // created some
+        int err = lfs_fs_deorphan(lfs, false);
+        if (err) {
+            return err;
         }
     }
 
@@ -2063,7 +2439,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
         return err;
     }
 
-    // current block end of list?
+    // current block not end of list?
     if (cwd.m.split) {
         // update tails, this creates a desync
         err = lfs_fs_preporphans(lfs, +1);
@@ -2513,8 +2889,11 @@ static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t 
*file,
                 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
                 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
+
+        // it may happen that the file name doesn't fit in the metadata 
blocks, e.g., a 256 byte file name will
+        // not fit in a 128 byte block.
+        err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
         if (err) {
-            err = LFS_ERR_NAMETOOLONG;
             goto cleanup;
         }
 
@@ -2730,7 +3109,6 @@ static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) 
{
 }
 #endif
 
-#ifndef LFS_READONLY
 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
     if (file->flags & LFS_F_READING) {
         if (!(file->flags & LFS_F_INLINE)) {
@@ -2739,6 +3117,7 @@ static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
         file->flags &= ~LFS_F_READING;
     }
 
+#ifndef LFS_READONLY
     if (file->flags & LFS_F_WRITING) {
         lfs_off_t pos = file->pos;
 
@@ -2757,12 +3136,12 @@ static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) 
{
                 // copy over a byte at a time, leave it up to caching
                 // to make this efficient
                 uint8_t data;
-                lfs_ssize_t res = lfs_file_rawread(lfs, &orig, &data, 1);
+                lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
                 if (res < 0) {
                     return res;
                 }
 
-                res = lfs_file_rawwrite(lfs, file, &data, 1);
+                res = lfs_file_flushedwrite(lfs, file, &data, 1);
                 if (res < 0) {
                     return res;
                 }
@@ -2805,10 +3184,10 @@ relocate:
 
         file->pos = pos;
     }
+#endif
 
     return 0;
 }
-#endif
 
 #ifndef LFS_READONLY
 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
@@ -2863,23 +3242,11 @@ static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t 
*file) {
 }
 #endif
 
-static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
         void *buffer, lfs_size_t size) {
-    LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
-
     uint8_t *data = buffer;
     lfs_size_t nsize = size;
 
-#ifndef LFS_READONLY
-    if (file->flags & LFS_F_WRITING) {
-        // flush out any writes
-        int err = lfs_file_flush(lfs, file);
-        if (err) {
-            return err;
-        }
-    }
-#endif
-
     if (file->pos >= file->ctz.size) {
         // eof if past end
         return 0;
@@ -2936,43 +3303,29 @@ static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, 
lfs_file_t *file,
     return size;
 }
 
-#ifndef LFS_READONLY
-static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
-        const void *buffer, lfs_size_t size) {
-    LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
-
-    const uint8_t *data = buffer;
-    lfs_size_t nsize = size;
+static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
+        void *buffer, lfs_size_t size) {
+    LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
 
-    if (file->flags & LFS_F_READING) {
-        // drop any reads
+#ifndef LFS_READONLY
+    if (file->flags & LFS_F_WRITING) {
+        // flush out any writes
         int err = lfs_file_flush(lfs, file);
         if (err) {
             return err;
         }
     }
+#endif
 
-    if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
-        file->pos = file->ctz.size;
-    }
-
-    if (file->pos + size > lfs->file_max) {
-        // Larger than file limit?
-        return LFS_ERR_FBIG;
-    }
+    return lfs_file_flushedread(lfs, file, buffer, size);
+}
 
-    if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
-        // fill with zeros
-        lfs_off_t pos = file->pos;
-        file->pos = file->ctz.size;
 
-        while (file->pos < pos) {
-            lfs_ssize_t res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
-            if (res < 0) {
-                return res;
-            }
-        }
-    }
+#ifndef LFS_READONLY
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+        const void *buffer, lfs_size_t size) {
+    const uint8_t *data = buffer;
+    lfs_size_t nsize = size;
 
     if ((file->flags & LFS_F_INLINE) &&
             lfs_max(file->pos+nsize, file->ctz.size) >
@@ -3054,9 +3407,51 @@ relocate:
         lfs_alloc_ack(lfs);
     }
 
-    file->flags &= ~LFS_F_ERRED;
     return size;
 }
+
+static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
+        const void *buffer, lfs_size_t size) {
+    LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
+
+    if (file->flags & LFS_F_READING) {
+        // drop any reads
+        int err = lfs_file_flush(lfs, file);
+        if (err) {
+            return err;
+        }
+    }
+
+    if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
+        file->pos = file->ctz.size;
+    }
+
+    if (file->pos + size > lfs->file_max) {
+        // Larger than file limit?
+        return LFS_ERR_FBIG;
+    }
+
+    if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
+        // fill with zeros
+        lfs_off_t pos = file->pos;
+        file->pos = file->ctz.size;
+
+        while (file->pos < pos) {
+            lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 
1);
+            if (res < 0) {
+                return res;
+            }
+        }
+    }
+
+    lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
+    if (nsize < 0) {
+        return nsize;
+    }
+
+    file->flags &= ~LFS_F_ERRED;
+    return nsize;
+}
 #endif
 
 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
@@ -3066,9 +3461,18 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, 
lfs_file_t *file,
     if (whence == LFS_SEEK_SET) {
         npos = off;
     } else if (whence == LFS_SEEK_CUR) {
-        npos = file->pos + off;
+        if ((lfs_soff_t)file->pos + off < 0) {
+            return LFS_ERR_INVAL;
+        } else {
+            npos = file->pos + off;
+        }
     } else if (whence == LFS_SEEK_END) {
-        npos = lfs_file_rawsize(lfs, file) + off;
+        lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
+        if (res < 0) {
+            return LFS_ERR_INVAL;
+        } else {
+            npos = res;
+        }
     }
 
     if (npos > lfs->file_max) {
@@ -3081,13 +3485,32 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, 
lfs_file_t *file,
         return npos;
     }
 
+    // if we're only reading and our new offset is still in the file's cache
+    // we can avoid flushing and needing to reread the data
+    if (
 #ifndef LFS_READONLY
+        !(file->flags & LFS_F_WRITING)
+#else
+        true
+#endif
+            ) {
+        int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
+        lfs_off_t noff = npos;
+        int nindex = lfs_ctz_index(lfs, &noff);
+        if (oindex == nindex
+                && noff >= file->cache.off
+                && noff < file->cache.off + file->cache.size) {
+            file->pos = npos;
+            file->off = noff;
+            return npos;
+        }
+    }
+
     // write out everything beforehand, may be noop if rdonly
     int err = lfs_file_flush(lfs, file);
     if (err) {
         return err;
     }
-#endif
 
     // update pos
     file->pos = npos;
@@ -3381,7 +3804,8 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, 
const char *newpath) {
     }
 
     lfs->mlist = prevdir.next;
-    if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
+    if (prevtag != LFS_ERR_NOENT
+            && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
         // fix orphan
         err = lfs_fs_preporphans(lfs, -1);
         if (err) {
@@ -3761,6 +4185,20 @@ static int lfs_rawmount(lfs_t *lfs, const struct 
lfs_config *cfg) {
 
                 lfs->attr_max = superblock.attr_max;
             }
+
+            if (superblock.block_count != lfs->cfg->block_count) {
+                LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
+                        superblock.block_count, lfs->cfg->block_count);
+                err = LFS_ERR_INVAL;
+                goto cleanup;
+            }
+
+            if (superblock.block_size != lfs->cfg->block_size) {
+                LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
+                        superblock.block_count, lfs->cfg->block_count);
+                err = LFS_ERR_INVAL;
+                goto cleanup;
+            }
         }
 
         // has gstate?
@@ -3987,109 +4425,6 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const 
lfs_block_t pair[2],
 }
 #endif
 
-#ifndef LFS_READONLY
-static int lfs_fs_relocate(lfs_t *lfs,
-        const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
-    // update internal root
-    if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
-        lfs->root[0] = newpair[0];
-        lfs->root[1] = newpair[1];
-    }
-
-    // update internally tracked dirs
-    for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
-        if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
-            d->m.pair[0] = newpair[0];
-            d->m.pair[1] = newpair[1];
-        }
-
-        if (d->type == LFS_TYPE_DIR &&
-                lfs_pair_cmp(oldpair, ((lfs_dir_t*)d)->head) == 0) {
-            ((lfs_dir_t*)d)->head[0] = newpair[0];
-            ((lfs_dir_t*)d)->head[1] = newpair[1];
-        }
-    }
-
-    // find parent
-    lfs_mdir_t parent;
-    lfs_stag_t tag = lfs_fs_parent(lfs, oldpair, &parent);
-    if (tag < 0 && tag != LFS_ERR_NOENT) {
-        return tag;
-    }
-
-    if (tag != LFS_ERR_NOENT) {
-        // update disk, this creates a desync
-        int err = lfs_fs_preporphans(lfs, +1);
-        if (err) {
-            return err;
-        }
-
-        // fix pending move in this pair? this looks like an optimization but
-        // is in fact _required_ since relocating may outdate the move.
-        uint16_t moveid = 0x3ff;
-        if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
-            moveid = lfs_tag_id(lfs->gstate.tag);
-            LFS_DEBUG("Fixing move while relocating "
-                    "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
-                    parent.pair[0], parent.pair[1], moveid);
-            lfs_fs_prepmove(lfs, 0x3ff, NULL);
-            if (moveid < lfs_tag_id(tag)) {
-                tag -= LFS_MKTAG(0, 1, 0);
-            }
-        }
-
-        lfs_pair_tole32(newpair);
-        err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
-                {LFS_MKTAG_IF(moveid != 0x3ff,
-                    LFS_TYPE_DELETE, moveid, 0), NULL},
-                {tag, newpair}));
-        lfs_pair_fromle32(newpair);
-        if (err) {
-            return err;
-        }
-
-        // next step, clean up orphans
-        err = lfs_fs_preporphans(lfs, -1);
-        if (err) {
-            return err;
-        }
-    }
-
-    // find pred
-    int err = lfs_fs_pred(lfs, oldpair, &parent);
-    if (err && err != LFS_ERR_NOENT) {
-        return err;
-    }
-
-    // if we can't find dir, it must be new
-    if (err != LFS_ERR_NOENT) {
-        // fix pending move in this pair? this looks like an optimization but
-        // is in fact _required_ since relocating may outdate the move.
-        uint16_t moveid = 0x3ff;
-        if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
-            moveid = lfs_tag_id(lfs->gstate.tag);
-            LFS_DEBUG("Fixing move while relocating "
-                    "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
-                    parent.pair[0], parent.pair[1], moveid);
-            lfs_fs_prepmove(lfs, 0x3ff, NULL);
-        }
-
-        // replace bad pair, either we clean up desync, or no desync occured
-        lfs_pair_tole32(newpair);
-        err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
-                {LFS_MKTAG_IF(moveid != 0x3ff,
-                    LFS_TYPE_DELETE, moveid, 0), NULL},
-                {LFS_MKTAG(LFS_TYPE_TAIL + parent.split, 0x3ff, 8), newpair}));
-        lfs_pair_fromle32(newpair);
-        if (err) {
-            return err;
-        }
-    }
-
-    return 0;
-}
-#endif
-
 #ifndef LFS_READONLY
 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
     LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
@@ -4144,77 +4479,129 @@ static int lfs_fs_demove(lfs_t *lfs) {
 #endif
 
 #ifndef LFS_READONLY
-static int lfs_fs_deorphan(lfs_t *lfs) {
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
     if (!lfs_gstate_hasorphans(&lfs->gstate)) {
         return 0;
     }
 
-    // Fix any orphans
-    lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
-    lfs_mdir_t dir;
-
-    // iterate over all directory directory entries
-    while (!lfs_pair_isnull(pdir.tail)) {
-        int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
-        if (err) {
-            return err;
-        }
+    int8_t found = 0;
+restart:
+    {
+        // Fix any orphans
+        lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
+        lfs_mdir_t dir;
 
-        // check head blocks for orphans
-        if (!pdir.split) {
-            // check if we have a parent
-            lfs_mdir_t parent;
-            lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
-            if (tag < 0 && tag != LFS_ERR_NOENT) {
-                return tag;
+        // iterate over all directory directory entries
+        while (!lfs_pair_isnull(pdir.tail)) {
+            int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
+            if (err) {
+                return err;
             }
 
-            if (tag == LFS_ERR_NOENT) {
-                // we are an orphan
-                LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
-                        pdir.tail[0], pdir.tail[1]);
-
-                err = lfs_dir_drop(lfs, &pdir, &dir);
-                if (err) {
-                    return err;
+            // check head blocks for orphans
+            if (!pdir.split) {
+                // check if we have a parent
+                lfs_mdir_t parent;
+                lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
+                if (tag < 0 && tag != LFS_ERR_NOENT) {
+                    return tag;
                 }
 
-                // refetch tail
-                continue;
-            }
+                // note we only check for full orphans if we may have had a
+                // power-loss, otherwise orphans are created intentionally
+                // during operations such as lfs_mkdir
+                if (tag == LFS_ERR_NOENT && powerloss) {
+                    // we are an orphan
+                    LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
+                            pdir.tail[0], pdir.tail[1]);
 
-            lfs_block_t pair[2];
-            lfs_stag_t res = lfs_dir_get(lfs, &parent,
-                    LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
-            if (res < 0) {
-                return res;
-            }
-            lfs_pair_fromle32(pair);
-
-            if (!lfs_pair_sync(pair, pdir.tail)) {
-                // we have desynced
-                LFS_DEBUG("Fixing half-orphan {0x%"PRIx32", 0x%"PRIx32"} "
-                            "-> {0x%"PRIx32", 0x%"PRIx32"}",
-                        pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
-
-                lfs_pair_tole32(pair);
-                err = lfs_dir_commit(lfs, &pdir, LFS_MKATTRS(
-                        {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pair}));
-                lfs_pair_fromle32(pair);
-                if (err) {
-                    return err;
+                    // steal state
+                    err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
+                    if (err) {
+                        return err;
+                    }
+
+                    // steal tail
+                    lfs_pair_tole32(dir.tail);
+                    int state = lfs_dir_orphaningcommit(lfs, &pdir, 
LFS_MKATTRS(
+                            {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
+                                dir.tail}));
+                    lfs_pair_fromle32(dir.tail);
+                    if (state < 0) {
+                        return state;
+                    }
+
+                    found += 1;
+
+                    // did our commit create more orphans?
+                    if (state == LFS_OK_ORPHANED) {
+                        goto restart;
+                    }
+
+                    // refetch tail
+                    continue;
                 }
 
-                // refetch tail
-                continue;
+                if (tag != LFS_ERR_NOENT) {
+                    lfs_block_t pair[2];
+                    lfs_stag_t state = lfs_dir_get(lfs, &parent,
+                            LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
+                    if (state < 0) {
+                        return state;
+                    }
+                    lfs_pair_fromle32(pair);
+
+                    if (!lfs_pair_sync(pair, pdir.tail)) {
+                        // we have desynced
+                        LFS_DEBUG("Fixing half-orphan "
+                                "{0x%"PRIx32", 0x%"PRIx32"} "
+                                "-> {0x%"PRIx32", 0x%"PRIx32"}",
+                                pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
+
+                        // fix pending move in this pair? this looks like an
+                        // optimization but is in fact _required_ since
+                        // relocating may outdate the move.
+                        uint16_t moveid = 0x3ff;
+                        if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+                            moveid = lfs_tag_id(lfs->gstate.tag);
+                            LFS_DEBUG("Fixing move while fixing orphans "
+                                    "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+                                    pdir.pair[0], pdir.pair[1], moveid);
+                            lfs_fs_prepmove(lfs, 0x3ff, NULL);
+                        }
+
+                        lfs_pair_tole32(pair);
+                        state = lfs_dir_orphaningcommit(lfs, &pdir, 
LFS_MKATTRS(
+                                {LFS_MKTAG_IF(moveid != 0x3ff,
+                                    LFS_TYPE_DELETE, moveid, 0), NULL},
+                                {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
+                                    pair}));
+                        lfs_pair_fromle32(pair);
+                        if (state < 0) {
+                            return state;
+                        }
+
+                        found += 1;
+
+                        // did our commit create more orphans?
+                        if (state == LFS_OK_ORPHANED) {
+                            goto restart;
+                        }
+
+                        // refetch tail
+                        continue;
+                    }
+                }
             }
-        }
 
-        pdir = dir;
+            pdir = dir;
+        }
     }
 
     // mark orphans as fixed
-    return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
+    return lfs_fs_preporphans(lfs, -lfs_min(
+            lfs_gstate_getorphans(&lfs->gstate),
+            found));
 }
 #endif
 
@@ -4225,7 +4612,7 @@ static int lfs_fs_forceconsistency(lfs_t *lfs) {
         return err;
     }
 
-    err = lfs_fs_deorphan(lfs);
+    err = lfs_fs_deorphan(lfs, true);
     if (err) {
         return err;
     }
@@ -5082,6 +5469,7 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t 
type) {
 }
 #endif
 
+#ifndef LFS_NO_MALLOC
 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
     int err = LFS_LOCK(lfs->cfg);
     if (err) {
@@ -5097,6 +5485,7 @@ int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const 
char *path, int flags) {
     LFS_UNLOCK(lfs->cfg);
     return err;
 }
+#endif
 
 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
         const char *path, int flags,
diff --git a/fs/littlefs/include/littlefs/lfs.h b/fs/littlefs/src/lfs.h
similarity index 94%
rename from fs/littlefs/include/littlefs/lfs.h
rename to fs/littlefs/src/lfs.h
index 44cf62105..3fc1e982f 100644
--- a/fs/littlefs/include/littlefs/lfs.h
+++ b/fs/littlefs/src/lfs.h
@@ -1,6 +1,7 @@
 /*
  * The little filesystem
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,7 +10,7 @@
 
 #include <stdint.h>
 #include <stdbool.h>
-#include "littlefs/lfs_util.h"
+#include "lfs_util.h"
 
 #ifdef __cplusplus
 extern "C"
@@ -22,7 +23,7 @@ extern "C"
 // Software library version
 // Major (top-nibble), incremented on backwards incompatible changes
 // Minor (bottom-nibble), incremented on feature additions
-#define LFS_VERSION 0x00020004
+#define LFS_VERSION 0x00020005
 #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
 #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0))
 
@@ -159,49 +160,49 @@ struct lfs_config {
     // information to the block device operations
     void *context;
 
-    // Read a region in a block. Negative error codes are propogated
+    // Read a region in a block. Negative error codes are propagated
     // to the user.
     int (*read)(const struct lfs_config *c, lfs_block_t block,
             lfs_off_t off, void *buffer, lfs_size_t size);
 
     // Program a region in a block. The block must have previously
-    // been erased. Negative error codes are propogated to the user.
+    // been erased. Negative error codes are propagated to the user.
     // May return LFS_ERR_CORRUPT if the block should be considered bad.
     int (*prog)(const struct lfs_config *c, lfs_block_t block,
             lfs_off_t off, const void *buffer, lfs_size_t size);
 
     // Erase a block. A block must be erased before being programmed.
     // The state of an erased block is undefined. Negative error codes
-    // are propogated to the user.
+    // are propagated to the user.
     // May return LFS_ERR_CORRUPT if the block should be considered bad.
     int (*erase)(const struct lfs_config *c, lfs_block_t block);
 
     // Sync the state of the underlying block device. Negative error codes
-    // are propogated to the user.
+    // are propagated to the user.
     int (*sync)(const struct lfs_config *c);
 
 #ifdef LFS_THREADSAFE
     // Lock the underlying block device. Negative error codes
-    // are propogated to the user.
+    // are propagated to the user.
     int (*lock)(const struct lfs_config *c);
 
     // Unlock the underlying block device. Negative error codes
-    // are propogated to the user.
+    // are propagated to the user.
     int (*unlock)(const struct lfs_config *c);
 #endif
 
-    // Minimum size of a block read. All read operations will be a
+    // Minimum size of a block read in bytes. All read operations will be a
     // multiple of this value.
     lfs_size_t read_size;
 
-    // Minimum size of a block program. All program operations will be a
-    // multiple of this value.
+    // Minimum size of a block program in bytes. All program operations will be
+    // a multiple of this value.
     lfs_size_t prog_size;
 
-    // Size of an erasable block. This does not impact ram consumption and
-    // may be larger than the physical erase size. However, non-inlined files
-    // take up at minimum one block. Must be a multiple of the read
-    // and program sizes.
+    // Size of an erasable block in bytes. This does not impact ram consumption
+    // and may be larger than the physical erase size. However, non-inlined
+    // files take up at minimum one block. Must be a multiple of the read and
+    // program sizes.
     lfs_size_t block_size;
 
     // Number of erasable blocks on the device.
@@ -215,11 +216,11 @@ struct lfs_config {
     // Set to -1 to disable block-level wear-leveling.
     int32_t block_cycles;
 
-    // Size of block caches. Each cache buffers a portion of a block in RAM.
-    // The littlefs needs a read cache, a program cache, and one additional
+    // Size of block caches in bytes. Each cache buffers a portion of a block 
in
+    // RAM. The littlefs needs a read cache, a program cache, and one 
additional
     // cache per file. Larger caches can improve performance by storing more
-    // data and reducing the number of disk accesses. Must be a multiple of
-    // the read and program sizes, and a factor of the block size.
+    // data and reducing the number of disk accesses. Must be a multiple of the
+    // read and program sizes, and a factor of the block size.
     lfs_size_t cache_size;
 
     // Size of the lookahead buffer in bytes. A larger lookahead buffer
@@ -485,7 +486,7 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info 
*info);
 // Returns the size of the attribute, or a negative error code on failure.
 // Note, the returned size is the size of the attribute on disk, irrespective
 // of the size of the buffer. This can be used to dynamically allocate a buffer
-// or check for existance.
+// or check for existence.
 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
         uint8_t type, void *buffer, lfs_size_t size);
 
@@ -513,6 +514,7 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t 
type);
 
 /// File operations ///
 
+#ifndef LFS_NO_MALLOC
 // Open a file
 //
 // The mode that the file is opened in is determined by the flags, which
@@ -522,6 +524,10 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t 
type);
 int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
         const char *path, int flags);
 
+// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
+// thus use lfs_file_opencfg() with config.buffer set.
+#endif
+
 // Open a file with extra configuration
 //
 // The mode that the file is opened in is determined by the flags, which
diff --git a/fs/littlefs/src/lfs_util.c b/fs/littlefs/src/lfs_util.c
index 4808a7475..9cdd1c60e 100644
--- a/fs/littlefs/src/lfs_util.c
+++ b/fs/littlefs/src/lfs_util.c
@@ -1,10 +1,11 @@
 /*
  * lfs util functions
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  */
-#include "littlefs/lfs_util.h"
+#include "lfs_util.h"
 
 // Only compile if user does not provide custom config
 #ifndef LFS_CONFIG
diff --git a/fs/littlefs/include/littlefs/lfs_util.h 
b/fs/littlefs/src/lfs_util.h
similarity index 99%
rename from fs/littlefs/include/littlefs/lfs_util.h
rename to fs/littlefs/src/lfs_util.h
index fc1b0c2ae..0cbc2a319 100644
--- a/fs/littlefs/include/littlefs/lfs_util.h
+++ b/fs/littlefs/src/lfs_util.h
@@ -1,6 +1,7 @@
 /*
  * lfs utility functions
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  */
diff --git a/fs/littlefs/src/mynewt_glue.c b/fs/littlefs/src/mynewt_glue.c
index 9bda5750e..df11deb13 100644
--- a/fs/littlefs/src/mynewt_glue.c
+++ b/fs/littlefs/src/mynewt_glue.c
@@ -26,8 +26,8 @@
 #include <disk/disk.h>
 #include <flash_map/flash_map.h>
 
-#include <littlefs/lfs.h>
-#include <littlefs/lfs_util.h>
+#include "lfs.h"
+#include "lfs_util.h"
 
 #include <fs/fs.h>
 #include <fs/fs_if.h>

Reply via email to