Add a -j <n> option, which, when used, will spawn <n> processes to do the debuginfo extraction in parallel. A named pipe is used to dispatch the files among the processes.
Signed-off-by: Michal Marek <mma...@suse.com> --- v3: - Rebased onto current master - Build debugsources.list (and now also elfbins.list) from per-job fragments to avoid races. v2: Use a regular pipe, because a named pipe leads to races. macros.in | 2 +- scripts/find-debuginfo.sh | 63 ++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 61 insertions(+), 4 deletions(-) diff --git a/macros.in b/macros.in index b03c5a9e95fa..8bde2d77a581 100644 --- a/macros.in +++ b/macros.in @@ -178,7 +178,7 @@ # the script. See the script for details. # %__debug_install_post \ - %{_rpmconfigdir}/find-debuginfo.sh %{?_missing_build_ids_terminate_build:--strict-build-id} %{?_include_minidebuginfo:-m} %{?_include_gdb_index:-i} %{?_unique_build_ids:--ver-rel "%{version}-%{release}"} %{?_unique_debug_names:--unique-debug-arch "%{_arch}"} %{?_find_debuginfo_dwz_opts} %{?_find_debuginfo_opts} "%{_builddir}/%{?buildsubdir}"\ + %{_rpmconfigdir}/find-debuginfo.sh %{?_smp_mflags} %{?_missing_build_ids_terminate_build:--strict-build-id} %{?_include_minidebuginfo:-m} %{?_include_gdb_index:-i} %{?_unique_build_ids:--ver-rel "%{version}-%{release}"} %{?_unique_debug_names:--unique-debug-arch "%{_arch}"} %{?_find_debuginfo_dwz_opts} %{?_find_debuginfo_opts} "%{_builddir}/%{?buildsubdir}"\ %{nil} # Template for debug information sub-package. diff --git a/scripts/find-debuginfo.sh b/scripts/find-debuginfo.sh index 6dcd5a46f700..2016222e7bf0 100644 --- a/scripts/find-debuginfo.sh +++ b/scripts/find-debuginfo.sh @@ -67,6 +67,9 @@ ver_rel= # Arch given by --unique-debug-arch unique_debug_arch= +# Number of parallel jobs to spawn +n_jobs=1 + BUILDDIR=. out=debugfiles.list nout=0 @@ -123,6 +126,13 @@ while [ $# -gt 0 ]; do -r) strip_r=true ;; + -j) + n_jobs=$2 + shift + ;; + -j*) + n_jobs=${1#-j} + ;; *) BUILDDIR=$1 shift @@ -334,9 +344,56 @@ do_file() fi } -while read nlinks inum f; do - do_file "$nlinks" "$inum" "$f" -done <"$temp/primary" +# 16^6 - 1 or about 16 milion files +FILENUM_DIGITS=6 +run_job() +{ + local jobid=$1 filenum + local SOURCEFILE=$temp/debugsources.$jobid ELFBINSFILE=$temp/elfbins.$jobid + + >"$SOURCEFILE" + >"$ELFBINSFILE" + # can't use read -n <n>, because it reads bytes one by one, allowing for + # races + while :; do + filenum=$(dd bs=$(( FILENUM_DIGITS + 1 )) count=1 status=none) + if test -z "$filenum"; then + break + fi + do_file $(sed -n "$(( 0x$filenum )) p" "$temp/primary") + done + echo 0 >"$temp/res.$jobid" +} + +n_files=$(wc -l <"$temp/primary") +if [ $n_jobs -gt $n_files ]; then + n_jobs=$n_files +fi +if [ $n_jobs -le 1 ]; then + while read nlinks inum f; do + do_file "$nlinks" "$inum" "$f" + done <"$temp/primary" +else + for ((i = 1; i <= n_files; i++)); do + printf "%0${FILENUM_DIGITS}x\\n" $i + done | ( + exec 3<&0 + for ((i = 0; i < n_jobs; i++)); do + # The shell redirects stdin to /dev/null for background jobs. Work + # around this by duplicating fd 0 + run_job $i <&3 & + done + wait + ) + for f in "$temp"/res.*; do + res=$(< "$f") + if [ "$res" != "0" ]; then + exit 1 + fi + done + cat "$temp"/debugsources.* >"$SOURCEFILE" + cat "$temp"/elfbins.* >"$ELFBINSFILE" +fi # Invoke the DWARF Compressor utility. if $run_dwz \ -- 2.6.6 _______________________________________________ Rpm-maint mailing list Rpm-maint@lists.rpm.org http://lists.rpm.org/mailman/listinfo/rpm-maint