Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package s3fs for openSUSE:Factory checked in at 2023-07-26 13:24:05 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/s3fs (Old) and /work/SRC/openSUSE:Factory/.s3fs.new.15225 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "s3fs" Wed Jul 26 13:24:05 2023 rev:19 rq:1100627 version:1.93 Changes: -------- --- /work/SRC/openSUSE:Factory/s3fs/s3fs.changes 2023-06-01 17:19:34.690180935 +0200 +++ /work/SRC/openSUSE:Factory/.s3fs.new.15225/s3fs.changes 2023-07-26 13:25:12.260456053 +0200 @@ -1,0 +2,7 @@ +Mon Jul 24 22:44:05 UTC 2023 - Julio González Gil <ju...@juliogonzalez.es> + +- Update to version 1.93 + * Allow listing implicit directories + * Fix thread safety issues + +------------------------------------------------------------------- Old: ---- s3fs-1.92.tar.gz New: ---- s3fs-1.93.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ s3fs.spec ++++++ --- /var/tmp/diff_new_pack.BQ7fkR/_old 2023-07-26 13:25:12.836459529 +0200 +++ /var/tmp/diff_new_pack.BQ7fkR/_new 2023-07-26 13:25:12.840459553 +0200 @@ -17,7 +17,7 @@ Name: s3fs -Version: 1.92 +Version: 1.93 Release: 0 Summary: FUSE file system backed by Amazon S3 bucket License: GPL-2.0-or-later ++++++ s3fs-1.92.tar.gz -> s3fs-1.93.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/.github/workflows/ci.yml new/s3fs-fuse-1.93/.github/workflows/ci.yml --- old/s3fs-fuse-1.92/.github/workflows/ci.yml 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/.github/workflows/ci.yml 2023-07-19 15:31:43.000000000 +0200 @@ -50,9 +50,9 @@ # matrix: container: + - ubuntu:23.04 - ubuntu:22.04 - ubuntu:20.04 - - ubuntu:18.04 - debian:bullseye - debian:buster - rockylinux:9 @@ -101,8 +101,8 @@ - name: Cppcheck run: | - # work around resource leak false positives on older Linux distributions - if cppcheck --version | awk '{if ($2 <= 1.86) { exit(1) } }'; then + # work around resource leak false positives on older and newer Linux distributions + if cppcheck --version | awk '{if ($2 <= 1.86 || $2 >= 2.10) { exit(1) } }'; then make cppcheck fi @@ -178,6 +178,91 @@ if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1) + MemoryTest: + runs-on: ubuntu-latest + + # + # build matrix for containers + # + strategy: + # + # do not stop jobs automatically if any of the jobs fail + # + fail-fast: false + + # + # matrix for type of checking + # + # [NOTE] + # Currently following test is not supported: + # - sanitize_memory : Future support planned + # - valgrind : Requires more than an hour of testing time + # + matrix: + checktype: + - glibc_debug + - sanitize_address + - sanitize_others + - sanitize_thread + + container: + image: fedora:38 + + options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse" + + steps: + - name: Checkout source code + uses: actions/checkout@v3 + + - name: Install packages + run: | + .github/workflows/linux-ci-helper.sh fedora:38 + + - name: Install clang + run: | + dnf install -y clang + if [ "${{ matrix.checktype }}" = "valgrind" ]; then + dnf install -y valgrind + fi + + # + # Set CXX/CXXFLAGS and Variables for test + # + - name: Set variables + run: | + COMMON_CXXFLAGS='-g -Wno-cpp -DS3FS_PTHREAD_ERRORCHECK=1' + if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG" >> $GITHUB_ENV + elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then + echo 'CXX=clang++' >> $GITHUB_ENV + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope" >> $GITHUB_ENV + echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1' >> $GITHUB_ENV + elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then + echo 'CXX=clang++' >> $GITHUB_ENV + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory" >> $GITHUB_ENV + elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then + echo 'CXX=clang++' >> $GITHUB_ENV + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread" >> $GITHUB_ENV + echo 'TSAN_OPTIONS=halt_on_error=1' >> $GITHUB_ENV + elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then + echo 'CXX=clang++' >> $GITHUB_ENV + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow" >> $GITHUB_ENV + elif [ "${{ matrix.checktype }}" = "valgrind" ]; then + echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1" >> $GITHUB_ENV + echo 'VALGRIND=--leak-check=full' >> $GITHUB_ENV + echo 'RETRIES=100' >> $GITHUB_ENV + fi + + - name: Build + run: | + ./autogen.sh + /bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" ./configure --prefix=/usr --with-openssl" + make + + - name: Test suite + run: | + /bin/sh -c "ALL_TESTS=1 ASAN_OPTIONS=${ASAN_OPTIONS} TSAN_OPTIONS=${TSAN_OPTIONS} VALGRIND=${VALGRIND} RETRIES=${RETRIES} make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)" + # # Local variables: # tab-width: 4 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/.github/workflows/linux-ci-helper.sh new/s3fs-fuse-1.93/.github/workflows/linux-ci-helper.sh --- old/s3fs-fuse-1.92/.github/workflows/linux-ci-helper.sh 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/.github/workflows/linux-ci-helper.sh 2023-07-19 15:31:43.000000000 +0200 @@ -80,39 +80,30 @@ SHELLCHECK_DIRECT_INSTALL=0 AWSCLI_DIRECT_INSTALL=1 -if [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then +if [ "${CONTAINER_FULLNAME}" = "ubuntu:23.04" ]; then PACKAGE_MANAGER_BIN="apt-get" PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" + INSTALL_PACKAGES="autoconf autotools-dev openjdk-17-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck shellcheck" INSTALL_CHECKER_PKG_OPTIONS="" -elif [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then - PACKAGE_MANAGER_BIN="apt-get" - PACKAGE_UPDATE_OPTIONS="update -y -qq" - PACKAGE_INSTALL_OPTIONS="install -y" - - INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" - INSTALL_CHECKER_PKGS="cppcheck shellcheck" - INSTALL_CHECKER_PKG_OPTIONS="" - -elif [ "${CONTAINER_FULLNAME}" = "ubuntu:18.04" ]; then +elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then PACKAGE_MANAGER_BIN="apt-get" PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" + INSTALL_PACKAGES="autoconf autotools-dev openjdk-17-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck shellcheck" INSTALL_CHECKER_PKG_OPTIONS="" -elif [ "${CONTAINER_FULLNAME}" = "ubuntu:16.04" ]; then +elif [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then PACKAGE_MANAGER_BIN="apt-get" PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" + INSTALL_PACKAGES="autoconf autotools-dev openjdk-17-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck shellcheck" INSTALL_CHECKER_PKG_OPTIONS="" @@ -121,7 +112,7 @@ PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip" + INSTALL_PACKAGES="autoconf autotools-dev openjdk-17-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck shellcheck" INSTALL_CHECKER_PKG_OPTIONS="" @@ -146,7 +137,7 @@ # PACKAGE_INSTALL_ADDITIONAL_OPTIONS="--allowerasing" - INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" + INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" INSTALL_CHECKER_PKGS="cppcheck" INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel" @@ -160,7 +151,7 @@ PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 unzip" + INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3 unzip" INSTALL_CHECKER_PKGS="cppcheck" INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=powertools" @@ -188,8 +179,7 @@ PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - # TODO: Cannot use java-latest-openjdk (17) due to modules issue in S3Proxy/jclouds/Guice - INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip" + INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck ShellCheck" INSTALL_CHECKER_PKG_OPTIONS="" @@ -198,8 +188,7 @@ PACKAGE_UPDATE_OPTIONS="update -y -qq" PACKAGE_INSTALL_OPTIONS="install -y" - # TODO: Cannot use java-latest-openjdk (17) due to modules issue in S3Proxy/jclouds/Guice - INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip" + INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip unzip" INSTALL_CHECKER_PKGS="cppcheck ShellCheck" INSTALL_CHECKER_PKG_OPTIONS="" @@ -208,7 +197,7 @@ PACKAGE_UPDATE_OPTIONS="refresh" PACKAGE_INSTALL_OPTIONS="install -y" - INSTALL_PACKAGES="automake curl-devel fuse fuse-devel gcc-c++ java-11-openjdk-headless libxml2-devel make openssl-devel python3-pip curl attr ShellCheck unzip" + INSTALL_PACKAGES="automake curl-devel fuse fuse-devel gcc-c++ java-17-openjdk-headless libxml2-devel make openssl-devel python3-pip curl attr ShellCheck unzip" INSTALL_CHECKER_PKGS="cppcheck ShellCheck" INSTALL_CHECKER_PKG_OPTIONS="" @@ -217,7 +206,7 @@ PACKAGE_UPDATE_OPTIONS="update --no-progress" PACKAGE_INSTALL_OPTIONS="add --no-progress --no-cache" - INSTALL_PACKAGES="bash curl g++ make automake autoconf libtool git curl-dev fuse-dev libxml2-dev coreutils procps attr sed mailcap openjdk11 aws-cli" + INSTALL_PACKAGES="bash curl g++ make automake autoconf libtool git curl-dev fuse-dev libxml2-dev coreutils procps attr sed mailcap openjdk17 aws-cli" INSTALL_CHECKER_PKGS="cppcheck shellcheck" INSTALL_CHECKER_PKG_OPTIONS="" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/ChangeLog new/s3fs-fuse-1.93/ChangeLog --- old/s3fs-fuse-1.92/ChangeLog 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/ChangeLog 2023-07-19 15:31:43.000000000 +0200 @@ -1,6 +1,11 @@ ChangeLog for S3FS ------------------ +Version 1.93 -- 19 Jul, 2023 (major changes only) +#2212 - Allow listing implicit directories +#2194 - #2209 - #2211 - #2214 - #2215 - Fix thread safety issues +#2191 - #2201 - Add support for FUSE-T on macOS + Version 1.92 -- 21 May, 2023 (major changes only) #1802 - #2104 - New option: streamupload #1922 - Enable noobj_cache by default diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/Makefile.am new/s3fs-fuse-1.93/Makefile.am --- old/s3fs-fuse-1.92/Makefile.am 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/Makefile.am 2023-07-19 15:31:43.000000000 +0200 @@ -44,6 +44,7 @@ --enable=warning,style,information,missingInclude \ --suppress=missingIncludeSystem \ --suppress=unmatchedSuppression \ + --suppress=useStlAlgorithm \ src/ test/ # diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/configure.ac new/s3fs-fuse-1.93/configure.ac --- old/s3fs-fuse-1.92/configure.ac 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/configure.ac 2023-07-19 15:31:43.000000000 +0200 @@ -20,7 +20,7 @@ dnl Process this file with autoconf to produce a configure script. AC_PREREQ([2.69]) -AC_INIT([s3fs],[1.92]) +AC_INIT([s3fs],[1.93]) AC_CONFIG_HEADER([config.h]) AC_CANONICAL_TARGET @@ -34,11 +34,12 @@ AC_CHECK_HEADERS([sys/extattr.h]) AC_CHECK_FUNCS([fallocate]) -CXXFLAGS="$CXXFLAGS -Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2" +CXXFLAGS="$CXXFLAGS -Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3" dnl ---------------------------------------------- dnl For macOS dnl ---------------------------------------------- +found_fuse_t=no case "$target" in *-cygwin* ) # Do something specific for windows using winfsp @@ -48,6 +49,7 @@ *-darwin* ) # Do something specific for mac min_fuse_version=2.7.3 + min_fuse_t_version=1.0.20 ;; *) # Default Case @@ -56,6 +58,11 @@ ;; esac +PKG_CHECK_MODULES([FUSE_T], [fuse-t >= ${min_fuse_t_version}], [found_fuse_t=yes], [found_fuse_t=no]) + +AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([common_lib_checking], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])]) + dnl ---------------------------------------------- dnl Choice SSL library dnl ---------------------------------------------- @@ -183,13 +190,18 @@ dnl For PKG_CONFIG before checking nss/gnutls. dnl this is redundant checking, but we need checking before following. dnl -PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ]) +AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([common_lib_checking], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])], + [PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])]) AC_MSG_CHECKING([compile s3fs with]) case "${auth_lib}" in openssl) AC_MSG_RESULT(OpenSSL) - PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ]) + AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])], + [PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])]) + AC_MSG_CHECKING([openssl 3.0 or later]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include <openssl/opensslv.h> @@ -206,7 +218,9 @@ AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])]) AS_IF([test $gnutls_nettle = 0], [ - PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ]) + AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])], + [PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])]) LIBS="-lgnutls -lgcrypt $LIBS" AC_MSG_CHECKING([gnutls is build with]) AC_MSG_RESULT(gcrypt) @@ -220,7 +234,9 @@ AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])]) AS_IF([test $gnutls_nettle = 1], [ - PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ]) + AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])], + [PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])]) LIBS="-lgnutls -lnettle $LIBS" AC_MSG_CHECKING([gnutls is build with]) AC_MSG_RESULT(nettle) @@ -229,7 +245,9 @@ ;; nss) AC_MSG_RESULT(NSS) - PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ]) + AS_IF([test "x$found_fuse_t" = "xyes"], + [PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])], + [PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])]) ;; *) AC_MSG_ERROR([unknown ssl library type.]) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/addhead.cpp new/s3fs-fuse-1.93/src/addhead.cpp --- old/s3fs-fuse-1.92/src/addhead.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/addhead.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -151,9 +151,7 @@ addheadlist.push_back(paddhead); // set flag - if(!is_enable){ - is_enable = true; - } + is_enable = true; } return true; } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/curl.cpp new/s3fs-fuse-1.93/src/curl.cpp --- old/s3fs-fuse-1.92/src/curl.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/curl.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -1944,7 +1944,8 @@ curl_warnings_once = true; } - curl_easy_reset(hCurl); + sCurlPool->ResetHandler(hCurl); + if(CURLE_OK != curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1)){ return false; } @@ -2078,13 +2079,13 @@ type = REQTYPE_UNSET; } + AutoLock lock(&S3fsCurl::curl_handles_lock, locktype); + if(clear_internal_data){ ClearInternalData(); } if(hCurl){ - AutoLock lock(&S3fsCurl::curl_handles_lock, locktype); - S3fsCurl::curl_times.erase(hCurl); S3fsCurl::curl_progress.erase(hCurl); sCurlPool->ReturnHandler(hCurl, restore_pool); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/curl_handlerpool.cpp new/s3fs-fuse-1.93/src/curl_handlerpool.cpp --- old/s3fs-fuse-1.92/src/curl_handlerpool.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/curl_handlerpool.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -53,11 +53,15 @@ bool CurlHandlerPool::Destroy() { - while(!mPool.empty()){ - CURL* hCurl = mPool.back(); - mPool.pop_back(); - if(hCurl){ - curl_easy_cleanup(hCurl); + { + AutoLock lock(&mLock); + + while(!mPool.empty()){ + CURL* hCurl = mPool.back(); + mPool.pop_back(); + if(hCurl){ + curl_easy_cleanup(hCurl); + } } } if (0 != pthread_mutex_destroy(&mLock)) { @@ -69,15 +73,14 @@ CURL* CurlHandlerPool::GetHandler(bool only_pool) { + AutoLock lock(&mLock); + CURL* hCurl = NULL; - { - AutoLock lock(&mLock); - - if(!mPool.empty()){ - hCurl = mPool.back(); - mPool.pop_back(); - S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size())); - } + + if(!mPool.empty()){ + hCurl = mPool.back(); + mPool.pop_back(); + S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size())); } if(only_pool){ return hCurl; @@ -94,10 +97,9 @@ if(!hCurl){ return; } + AutoLock lock(&mLock); if(restore_pool){ - AutoLock lock(&mLock); - S3FS_PRN_DBG("Return handler to pool"); mPool.push_back(hCurl); @@ -115,6 +117,16 @@ } } +void CurlHandlerPool::ResetHandler(CURL* hCurl) +{ + if(!hCurl){ + return; + } + AutoLock lock(&mLock); + + curl_easy_reset(hCurl); +} + /* * Local variables: * tab-width: 4 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/curl_handlerpool.h new/s3fs-fuse-1.93/src/curl_handlerpool.h --- old/s3fs-fuse-1.92/src/curl_handlerpool.h 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/curl_handlerpool.h 2023-07-19 15:31:43.000000000 +0200 @@ -45,6 +45,7 @@ CURL* GetHandler(bool only_pool); void ReturnHandler(CURL* hCurl, bool restore_pool); + void ResetHandler(CURL* hCurl); private: int mMaxHandlers; diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/fdcache.cpp new/s3fs-fuse-1.93/src/fdcache.cpp --- old/s3fs-fuse-1.92/src/fdcache.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/fdcache.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -320,7 +320,7 @@ } FdManager::checked_lseek = true; FdManager::have_lseek_hole = false; - return FdManager::have_lseek_hole; + return false; } // check SEEK_DATA/SEEK_HOLE options @@ -577,7 +577,7 @@ } // (re)open - if(-1 == (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){ + if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){ S3FS_PRN_ERR("failed to (re)open and create new pseudo fd for path(%s).", path); return NULL; } @@ -593,7 +593,8 @@ ent = new FdEntity(path, cache_path.c_str()); // open - if(-1 == (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){ + if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){ + S3FS_PRN_ERR("failed to open and create new pseudo fd for path(%s).", path); delete ent; return NULL; } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/fdcache_entity.cpp new/s3fs-fuse-1.93/src/fdcache_entity.cpp --- old/s3fs-fuse-1.92/src/fdcache_entity.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/fdcache_entity.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -24,6 +24,7 @@ #include <unistd.h> #include <limits.h> #include <sys/stat.h> +#include <memory> #include "common.h" #include "fdcache_entity.h" @@ -485,6 +486,8 @@ bool need_save_csf = false; // need to save(reset) cache stat file bool is_truncate = false; // need to truncate + std::auto_ptr<CacheFileStat> pcfstat; + if(!cachepath.empty()){ // using cache struct stat st; @@ -498,12 +501,12 @@ } // open cache and cache stat file, load page info. - CacheFileStat cfstat(path.c_str()); + pcfstat.reset(new CacheFileStat(path.c_str())); // try to open cache file if( -1 != (physical_fd = open(cachepath.c_str(), O_RDWR)) && 0 != (inode = FdEntity::GetInode(physical_fd)) && - pagelist.Serialize(cfstat, false, inode) ) + pagelist.Serialize(*pcfstat, false, inode) ) { // succeed to open cache file and to load stats data memset(&st, 0, sizeof(struct stat)); @@ -517,13 +520,18 @@ if(-1 == size){ if(st.st_size != pagelist.Size()){ pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified - need_save_csf = true; // need to update page info + need_save_csf = true; // need to update page info } size = st.st_size; }else{ + // First if the current cache file size and pagelist do not match, fix pagelist. + if(st.st_size != pagelist.Size()){ + pagelist.Resize(st.st_size, false, true); // Areas with increased size are modified + need_save_csf = true; // need to update page info + } if(size != pagelist.Size()){ pagelist.Resize(size, false, true); // Areas with increased size are modified - need_save_csf = true; // need to update page info + need_save_csf = true; // need to update page info } if(size != st.st_size){ is_truncate = true; @@ -631,9 +639,8 @@ } // reset cache stat file - if(need_save_csf){ - CacheFileStat cfstat(path.c_str()); - if(!pagelist.Serialize(cfstat, true, inode)){ + if(need_save_csf && pcfstat.get()){ + if(!pagelist.Serialize(*pcfstat, true, inode)){ S3FS_PRN_WARN("failed to save cache stat file(%s), but continue...", path.c_str()); } } @@ -1401,14 +1408,14 @@ // int FdEntity::RowFlush(int fd, const char* tpath, AutoLock::Type type, bool force_sync) { + AutoLock auto_lock(&fdent_lock, type); + S3FS_PRN_INFO3("[tpath=%s][path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(tpath), path.c_str(), fd, physical_fd); if(-1 == physical_fd){ return -EBADF; } - AutoLock auto_lock(&fdent_lock, type); - // check pseudo fd and its flag fdinfo_map_t::iterator miter = pseudo_fd_map.find(fd); if(pseudo_fd_map.end() == miter || NULL == miter->second){ @@ -2482,10 +2489,11 @@ { S3FS_PRN_DBG("[path=%s][physical_fd=%d][offset=%lld][size=%zu]", path.c_str(), physical_fd, static_cast<long long int>(start), size); + AutoLock auto_lock(&fdent_data_lock); + if(-1 == physical_fd){ return false; } - AutoLock auto_lock(&fdent_data_lock); // get page list that have no data fdpage_list_t nodata_pages; @@ -2523,10 +2531,19 @@ // void FdEntity::MarkDirtyNewFile() { + AutoLock auto_lock(&fdent_lock); + pagelist.Init(0, false, true); pending_status = CREATE_FILE_PENDING; } +bool FdEntity::IsDirtyNewFile() const +{ + AutoLock auto_lock(&fdent_lock); + + return (CREATE_FILE_PENDING == pending_status); +} + bool FdEntity::AddUntreated(off_t start, off_t size) { bool result = untreated_list.AddPart(start, size); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/fdcache_entity.h new/s3fs-fuse-1.93/src/fdcache_entity.h --- old/s3fs-fuse-1.92/src/fdcache_entity.h 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/fdcache_entity.h 2023-07-19 15:31:43.000000000 +0200 @@ -149,7 +149,7 @@ bool PunchHole(off_t start = 0, size_t size = 0); void MarkDirtyNewFile(); - bool IsDirtyNewFile() { return (CREATE_FILE_PENDING == pending_status); } + bool IsDirtyNewFile() const; bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const; bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/s3fs.cpp new/s3fs-fuse-1.93/src/s3fs.cpp --- old/s3fs-fuse-1.92/src/s3fs.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/s3fs.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -21,6 +21,7 @@ #include <cstdio> #include <cstdlib> #include <errno.h> +#include <set> #include <unistd.h> #include <dirent.h> #include <sys/types.h> @@ -256,6 +257,88 @@ // whether the stat information file for mount point exists static MpStatFlag* pHasMpStat = NULL; +// +// A synchronous class that calls the fuse_fill_dir_t function that processes the readdir data +// +class SyncFiller +{ + private: + mutable pthread_mutex_t filler_lock; + bool is_lock_init; + void* filler_buff; + fuse_fill_dir_t filler_func; + std::set<std::string> filled; + + public: + explicit SyncFiller(void* buff = NULL, fuse_fill_dir_t filler = NULL); + ~SyncFiller(); + + int Fill(const char *name, const struct stat *stbuf, off_t off); + int SufficiencyFill(const std::vector<std::string>& pathlist); +}; + +SyncFiller::SyncFiller(void* buff, fuse_fill_dir_t filler) : is_lock_init(false), filler_buff(buff), filler_func(filler) +{ + if(!filler_buff || !filler_func){ + S3FS_PRN_CRIT("Internal error: SyncFiller constructor parameter is critical value."); + abort(); + } + + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); +#if S3FS_PTHREAD_ERRORCHECK + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); +#endif + + int result; + if(0 != (result = pthread_mutex_init(&filler_lock, &attr))){ + S3FS_PRN_CRIT("failed to init filler_lock: %d", result); + abort(); + } + is_lock_init = true; +} + +SyncFiller::~SyncFiller() +{ + if(is_lock_init){ + int result; + if(0 != (result = pthread_mutex_destroy(&filler_lock))){ + S3FS_PRN_CRIT("failed to destroy filler_lock: %d", result); + abort(); + } + is_lock_init = false; + } +} + +// +// See. prototype fuse_fill_dir_t in fuse.h +// +int SyncFiller::Fill(const char *name, const struct stat *stbuf, off_t off) +{ + AutoLock auto_lock(&filler_lock); + + int result = 0; + if(filled.insert(std::string(name)).second){ + result = filler_func(filler_buff, name, stbuf, off); + } + return result; +} + +int SyncFiller::SufficiencyFill(const std::vector<std::string>& pathlist) +{ + AutoLock auto_lock(&filler_lock); + + int result = 0; + for(std::vector<std::string>::const_iterator it = pathlist.begin(); it != pathlist.end(); ++it) { + if(filled.insert(*it).second){ + if(0 != filler_func(filler_buff, it->c_str(), 0, 0)){ + result = 1; + } + } + } + return result; +} + //------------------------------------------------------------------- // Functions //------------------------------------------------------------------- @@ -3022,7 +3105,6 @@ bool is_new_file = ent->IsDirtyNewFile(); - // TODO: correct locks held? if(0 != (result = ent->UploadPending(static_cast<int>(fi->fh), AutoLock::NONE))){ S3FS_PRN_ERR("could not upload pending data(meta, etc) for pseudo_fd(%llu) / path(%s)", (unsigned long long)(fi->fh), path); return result; @@ -3064,12 +3146,6 @@ return result; } -struct multi_head_callback_param -{ - void* buf; - fuse_fill_dir_t filler; -}; - static bool multi_head_callback(S3fsCurl* s3fscurl, void* param) { if(!s3fscurl){ @@ -3089,13 +3165,13 @@ bpath = s3fs_wtf8_decode(bpath); } if(param){ - struct multi_head_callback_param* pcbparam = reinterpret_cast<struct multi_head_callback_param*>(param); + SyncFiller* pcbparam = reinterpret_cast<SyncFiller*>(param); struct stat st; if(StatCache::getStatCacheData()->GetStat(saved_path, &st)){ - pcbparam->filler(pcbparam->buf, bpath.c_str(), &st, 0); + pcbparam->Fill(bpath.c_str(), &st, 0); }else{ S3FS_PRN_INFO2("Could not find %s file in stat cache.", saved_path.c_str()); - pcbparam->filler(pcbparam->buf, bpath.c_str(), 0, 0); + pcbparam->Fill(bpath.c_str(), 0, 0); } }else{ S3FS_PRN_WARN("param(multi_head_callback_param*) is NULL, then can not call filler."); @@ -3181,11 +3257,9 @@ curlmulti.SetSuccessCallback(multi_head_callback); curlmulti.SetRetryCallback(multi_head_retry_callback); - // Success Callback function parameter - struct multi_head_callback_param success_param; - success_param.buf = buf; - success_param.filler = filler; - curlmulti.SetSuccessCallbackParam(reinterpret_cast<void*>(&success_param)); + // Success Callback function parameter(SyncFiller object) + SyncFiller syncfiller(buf, filler); + curlmulti.SetSuccessCallbackParam(reinterpret_cast<void*>(&syncfiller)); // Not found Callback function parameter struct multi_head_notfound_callback_param notfound_param; @@ -3218,7 +3292,7 @@ if(use_wtf8){ bpath = s3fs_wtf8_decode(bpath); } - filler(buf, bpath.c_str(), &st, 0); + syncfiller.Fill(bpath.c_str(), &st, 0); continue; } @@ -3256,6 +3330,9 @@ // Objects that could not be found by HEAD request may exist only // as a path, so search for objects under that path.(a case of no dir object) // + if(!support_compat_dir){ + syncfiller.SufficiencyFill(head.common_prefixes); + } if(support_compat_dir && !notfound_param.notfound_list.empty()){ // [NOTE] not need to lock to access this here. // dummy header mode_t dirmask = umask(0); // macos does not have getumask() @@ -3287,10 +3364,10 @@ struct stat st; if(StatCache::getStatCacheData()->GetStat(dirpath, &st)){ - filler(buf, base_path.c_str(), &st, 0); + syncfiller.Fill(base_path.c_str(), &st, 0); }else{ S3FS_PRN_INFO2("Could not find %s directory(no dir object) in stat cache.", dirpath.c_str()); - filler(buf, base_path.c_str(), 0, 0); + syncfiller.Fill(base_path.c_str(), 0, 0); } }else{ S3FS_PRN_ERR("failed adding stat cache [path=%s], but dontinue...", dirpath.c_str()); @@ -3778,7 +3855,7 @@ { S3FS_PRN_INFO("[path=%s][name=%s][value=%p][size=%zu][flags=0x%x]", path, name, value, size, flags); - if((value && 0 == size) || (!value && 0 < size)){ + if(!value && 0 < size){ S3FS_PRN_ERR("Wrong parameter: value(%p), size(%zu)", value, size); return 0; } @@ -4241,6 +4318,11 @@ conn->want |= FUSE_CAP_BIG_WRITES; } + if(!ThreadPoolMan::Initialize(max_thread_count)){ + S3FS_PRN_CRIT("Could not create thread pool(%d)", max_thread_count); + s3fs_exit_fuseloop(EXIT_FAILURE); + } + // Signal object if(!S3fsSignals::Initialize()){ S3FS_PRN_ERR("Failed to initialize signal object, but continue..."); @@ -4258,6 +4340,8 @@ S3FS_PRN_WARN("Failed to clean up signal object."); } + ThreadPoolMan::Destroy(); + // cache(remove at last) if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){ S3FS_PRN_WARN("Could not remove cache directory."); @@ -5664,15 +5748,6 @@ max_dirty_data = -1; } - if(!ThreadPoolMan::Initialize(max_thread_count)){ - S3FS_PRN_EXIT("Could not create thread pool(%d)", max_thread_count); - S3fsCurl::DestroyS3fsCurl(); - s3fs_destroy_global_ssl(); - destroy_parser_xml_lock(); - destroy_basename_lock(); - exit(EXIT_FAILURE); - } - // check free disk space if(!FdManager::IsSafeDiskSpace(NULL, S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount())){ S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs."); @@ -5736,9 +5811,6 @@ } fuse_opt_free_args(&custom_args); - // Destroy thread pool - ThreadPoolMan::Destroy(); - // Destroy curl if(!S3fsCurl::DestroyS3fsCurl()){ S3FS_PRN_WARN("Could not release curl library."); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/s3fs_cred.cpp new/s3fs-fuse-1.93/src/s3fs_cred.cpp --- old/s3fs-fuse-1.92/src/s3fs_cred.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/s3fs_cred.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -57,8 +57,6 @@ "s3fs-fuse built-in Credential I/F Function\n" "Copyright(C) 2007 s3fs-fuse\n"; - S3FS_PRN_CRIT("Check why built-in function was called, the external credential library must have VersionS3fsCredential function."); - if(detail){ return detail_version; }else{ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/s3fs_xml.cpp new/s3fs-fuse-1.93/src/s3fs_xml.cpp --- old/s3fs-fuse-1.92/src/s3fs_xml.cpp 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/s3fs_xml.cpp 2023-07-19 15:31:43.000000000 +0200 @@ -341,7 +341,7 @@ return result; } -int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head) +int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix) { xmlXPathObjectPtr contents_xp; xmlNodeSetPtr content_nodes; @@ -409,6 +409,9 @@ std::string decname = get_decoded_cr_code(name); free(name); + if(prefix){ + head.common_prefixes.push_back(decname); + } if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : NULL), is_dir)){ S3FS_PRN_ERR("insert_object returns with error."); xmlXPathFreeObject(key); @@ -462,8 +465,8 @@ ex_prefix += "Prefix"; ex_etag += "ETag"; - if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) || - -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) ) + if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head, /*prefix=*/ false) || + -1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head, /*prefix=*/ true) ) { S3FS_PRN_ERR("append_objects_from_xml_ex returns with error."); S3FS_XMLXPATHFREECONTEXT(ctx); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/s3fs_xml.h new/s3fs-fuse-1.93/src/s3fs_xml.h --- old/s3fs-fuse-1.92/src/s3fs_xml.h 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/s3fs_xml.h 2023-07-19 15:31:43.000000000 +0200 @@ -33,7 +33,7 @@ // Functions //------------------------------------------------------------------- bool is_truncated(xmlDocPtr doc); -int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head); +int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix); int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head); xmlChar* get_next_continuation_token(xmlDocPtr doc); xmlChar* get_next_marker(xmlDocPtr doc); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/src/s3objlist.h new/s3fs-fuse-1.93/src/s3objlist.h --- old/s3fs-fuse-1.92/src/s3objlist.h 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/src/s3objlist.h 2023-07-19 15:31:43.000000000 +0200 @@ -24,6 +24,7 @@ #include <list> #include <map> #include <string> +#include <vector> //------------------------------------------------------------------- // Structure / Typedef @@ -47,6 +48,8 @@ { private: s3obj_t objects; + public: + std::vector<std::string> common_prefixes; private: bool insert_normalized(const char* name, const char* normalized, bool is_dir); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/test/integration-test-common.sh new/s3fs-fuse-1.93/test/integration-test-common.sh --- old/s3fs-fuse-1.92/test/integration-test-common.sh 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/test/integration-test-common.sh 2023-07-19 15:31:43.000000000 +0200 @@ -168,7 +168,7 @@ if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then S3PROXY_CACERT_FILE="/tmp/keystore.pem" rm -f /tmp/keystore.jks "${S3PROXY_CACERT_FILE}" - echo -e 'password\npassword\n\n\n\n\n\n\nyes' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1 + printf 'password\npassword\n\n\n\n\n\n\ny' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1 echo password | keytool -exportcert -keystore /tmp/keystore.jks -rfc -file "${S3PROXY_CACERT_FILE}" else S3PROXY_CACERT_FILE="" @@ -258,6 +258,16 @@ local VIA_STDBUF_CMDLINE="${STDBUF_BIN} -oL -eL" fi + # [NOTE] + # On macOS we may get a VERIFY error for the self-signed certificate used by s3proxy. + # We can specify NO_CHECK_CERT=1 to avoid this. + # + if [ -n "${NO_CHECK_CERT}" ] && [ "${NO_CHECK_CERT}" -eq 1 ]; then + local NO_CHECK_CERT_OPT="-o no_check_certificate" + else + local NO_CHECK_CERT_OPT="" + fi + # Common s3fs options: # # TODO: Allow all these options to be overridden with env variables @@ -292,6 +302,7 @@ ${AUTH_OPT} \ ${DIRECT_IO_OPT} \ ${S3FS_HTTP_PROXY_OPT} \ + ${NO_CHECK_CERT_OPT} \ -o stat_cache_expire=1 \ -o stat_cache_interval_expire=1 \ -o dbglevel="${DBGLEVEL:=info}" \ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/test/integration-test-main.sh new/s3fs-fuse-1.93/test/integration-test-main.sh --- old/s3fs-fuse-1.92/test/integration-test-main.sh 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/test/integration-test-main.sh 2023-07-19 15:31:43.000000000 +0200 @@ -389,6 +389,8 @@ describe "Test external directory creation ..." local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/directory/"${TEST_TEXT_FILE}" echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" + # shellcheck disable=SC2010 + ls | grep -q directory ls directory >/dev/null 2>&1 get_permissions directory | grep -q 750$ ls directory @@ -2252,142 +2254,64 @@ echo data1 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_1}" echo data2 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_2}" - # shellcheck disable=SC2009 - if ps u -p "${S3FS_PID}" | grep -q compat_dir; then - # - # with "compat_dir", found directories and files - # - - # Top directory - # shellcheck disable=SC2010 - if ! ls -1 | grep -q '^not_existed_dir_single$'; then - echo "Expect to find \"not_existed_dir_single\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 | grep -q '^not_existed_dir_parent$'; then - echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found" - return 1; - fi - - # Single nest directory - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_single | grep -q '^not_existed_dir_single$'; then - echo "Expect to find \"not_existed_dir_single\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" - return 1; - fi - - # Double nest directory - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_parent | grep -q '^not_existed_dir_parent'; then - echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_parent/not_existed_dir_child | grep -q '^not_existed_dir_parent/not_existed_dir_child'; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" - return 1; - fi - - rm -rf not_existed_dir_single - rm -rf not_existed_dir_parent - - else - # - # without "compat_dir", found directories and files - # - # [NOTE] - # If specify a directory path, the file under that directory will be found. - # And if specify a file full path, it will be found. - # - - # Top directory - # shellcheck disable=SC2010 - if ls -1 | grep -q '^not_existed_dir_single$'; then - echo "Expect to not find \"not_existed_dir_single\" directory, but it is found" - return 1; - fi - # shellcheck disable=SC2010 - if ls -1 | grep -q '^not_existed_dir_parent$'; then - echo "Expect to not find \"not_existed_dir_parent\" directory, but it is found" - return 1; - fi - - # Single nest directory - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_single | grep -q '^not_existed_dir_single$'; then - echo "Expect to find \"not_existed_dir_single\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" - return 1; - fi - - # Double nest directory - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_parent | grep -q '^not_existed_dir_parent'; then - echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then - echo "Expect to not find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -d not_existed_dir_parent/not_existed_dir_child | grep -q '^not_existed_dir_parent/not_existed_dir_child'; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" - return 1; - fi - # shellcheck disable=SC2010 - if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then - echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" - return 1; - fi - - rm -rf not_existed_dir_single - - # [NOTE] - # This case could not remove sub directory, then below command will be failed. - #rm -rf not_existed_dir_parent + # Top directory + # shellcheck disable=SC2010 + if ! ls -1 | grep -q '^not_existed_dir_single$'; then + echo "Expect to find \"not_existed_dir_single\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 | grep -q '^not_existed_dir_parent$'; then + echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found" + return 1; + fi + + # Single nest directory + # shellcheck disable=SC2010 + if ! ls -d not_existed_dir_single | grep -q '^not_existed_dir_single$'; then + echo "Expect to find \"not_existed_dir_single\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 not_existed_dir_single | grep -q "^${TEST_TEXT_FILE}\$"; then + echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 "not_existed_dir_single/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_single/${TEST_TEXT_FILE}\$"; then + echo "Expect to find \"not_existed_dir_single/${TEST_TEXT_FILE}\" file, but it is not found" + return 1; + fi + + # Double nest directory + # shellcheck disable=SC2010 + if ! ls -d not_existed_dir_parent | grep -q '^not_existed_dir_parent'; then + echo "Expect to find \"not_existed_dir_parent\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 not_existed_dir_parent | grep -q '^not_existed_dir_child'; then + echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -d not_existed_dir_parent/not_existed_dir_child | grep -q '^not_existed_dir_parent/not_existed_dir_child'; then + echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 not_existed_dir_parent/not_existed_dir_child | grep -q "^${TEST_TEXT_FILE}\$"; then + echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" + return 1; + fi + # shellcheck disable=SC2010 + if ! ls -1 "not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}" | grep -q "^not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\$"; then + echo "Expect to find \"not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}\" directory, but it is not found" + return 1; fi + + rm -rf not_existed_dir_single + rm -rf not_existed_dir_parent } function test_ut_ossfs { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/s3fs-fuse-1.92/test/test-utils.sh new/s3fs-fuse-1.93/test/test-utils.sh --- old/s3fs-fuse-1.92/test/test-utils.sh 2023-05-22 01:25:50.000000000 +0200 +++ new/s3fs-fuse-1.93/test/test-utils.sh 2023-07-19 15:31:43.000000000 +0200 @@ -137,22 +137,6 @@ echo "Could not create file ${TEST_TEXT_FILE}, it does not exist" exit 1 fi - - # wait & check - local BASE_TEXT_LENGTH; BASE_TEXT_LENGTH=$(echo "${TEXT}" | wc -c | awk '{print $1}') - local TRY_COUNT=10 - while true; do - local MK_TEXT_LENGTH - MK_TEXT_LENGTH=$(wc -c "${TEST_TEXT_FILE}" | awk '{print $1}') - if [ "${BASE_TEXT_LENGTH}" -eq "${MK_TEXT_LENGTH}" ]; then - break - fi - local TRY_COUNT=$((TRY_COUNT - 1)) - if [ "${TRY_COUNT}" -le 0 ]; then - echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong" - fi - sleep 1 - done } function rm_test_file { @@ -295,32 +279,29 @@ } function get_ctime() { + # ex: "1657504903.019784214" if [ "$(uname)" = "Darwin" ]; then - # ex: "1657504903.019784214" stat -f "%Fc" "$1" else - # ex: "2022-07-24 12:45:18.621046168 +0000" - stat -c "%z" "$1" + stat -c "%.9Z" "$1" fi } function get_mtime() { + # ex: "1657504903.019784214" if [ "$(uname)" = "Darwin" ]; then - # ex: "1657504903.019784214" stat -f "%Fm" "$1" else - # ex: "2022-07-24 12:45:18.621046168 +0000" - stat -c "%y" "$1" + stat -c "%.9Y" "$1" fi } function get_atime() { + # ex: "1657504903.019784214" if [ "$(uname)" = "Darwin" ]; then - # ex: "1657504903.019784214" stat -f "%Fa" "$1" else - # ex: "2022-07-24 12:45:18.621046168 +0000" - stat -c "%x" "$1" + stat -c "%0.9X" "$1" fi }