Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package restic for openSUSE:Factory checked 
in at 2024-02-06 16:35:19
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/restic (Old)
 and      /work/SRC/openSUSE:Factory/.restic.new.1815 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "restic"

Tue Feb  6 16:35:19 2024 rev:24 rq:1144499 version:0.16.4

Changes:
--------
--- /work/SRC/openSUSE:Factory/restic/restic.changes    2024-01-22 
20:39:01.315731064 +0100
+++ /work/SRC/openSUSE:Factory/.restic.new.1815/restic.changes  2024-02-06 
16:36:11.678621351 +0100
@@ -1,0 +2,12 @@
+Mon Feb  5 03:29:55 UTC 2024 - Marcus Rueckert <mrueck...@suse.de>
+
+- Update to version 0.16.4
+  This release works around and improves detection of a bug in the
+  compression library used by restic. The resulting issue only
+  happens when using restic 0.16.3 and the max compression level
+  (the default auto and off compression levels are not affected),
+  and when the source files being backed up have specific data in
+  them to trigger the bug. If you use max compression, you can use
+  restic check --read-data to make sure you're not affected.
+
+-------------------------------------------------------------------

Old:
----
  restic-0.16.3.tar.gz
  restic-0.16.3.tar.gz.asc

New:
----
  restic-0.16.4.tar.gz
  restic-0.16.4.tar.gz.asc

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ restic.spec ++++++
--- /var/tmp/diff_new_pack.xpJYDp/_old  2024-02-06 16:36:12.922666356 +0100
+++ /var/tmp/diff_new_pack.xpJYDp/_new  2024-02-06 16:36:12.922666356 +0100
@@ -20,7 +20,7 @@
 %define import_path github.com/restic/restic
 
 Name:           restic
-Version:        0.16.3
+Version:        0.16.4
 Release:        0
 Summary:        Backup program with deduplication and encryption
 License:        BSD-2-Clause

++++++ restic-0.16.3.tar.gz -> restic-0.16.4.tar.gz ++++++
/work/SRC/openSUSE:Factory/restic/restic-0.16.3.tar.gz 
/work/SRC/openSUSE:Factory/.restic.new.1815/restic-0.16.4.tar.gz differ: char 
13, line 1

++++++ vendor.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/vendor/github.com/klauspost/compress/README.md 
new/vendor/github.com/klauspost/compress/README.md
--- old/vendor/github.com/klauspost/compress/README.md  2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/README.md  2024-02-05 
04:30:38.000000000 +0100
@@ -16,14 +16,6 @@
 
 # changelog
 
-* Oct 22nd, 2023 - 
[v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
-       * zstd: Fix rare *CORRUPTION* output in "best" mode. See 
https://github.com/klauspost/compress/pull/876
-
-* Oct 14th, 2023 - 
[v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
-       * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in 
https://github.com/klauspost/compress/pull/871
-       * flate: Reduce allocations in decompressor and minor code improvements 
by @fakefloordiv in https://github.com/klauspost/compress/pull/869
-       * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in 
https://github.com/klauspost/compress/pull/867
-
 * Sept 19th, 2023 - 
[v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
        * Add experimental dictionary builder  
https://github.com/klauspost/compress/pull/853
        * Add xerial snappy read/writer 
https://github.com/klauspost/compress/pull/838
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/vendor/github.com/klauspost/compress/fse/compress.go 
new/vendor/github.com/klauspost/compress/fse/compress.go
--- old/vendor/github.com/klauspost/compress/fse/compress.go    2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/fse/compress.go    2024-02-05 
04:30:38.000000000 +0100
@@ -212,7 +212,7 @@
                previous0 bool
                charnum   uint16
 
-               maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) 
+ 3
+               maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
 
                // Write Table Size
                bitStream = uint32(tableLog - minTablelog)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/vendor/github.com/klauspost/compress/huff0/bytereader.go 
new/vendor/github.com/klauspost/compress/huff0/bytereader.go
--- old/vendor/github.com/klauspost/compress/huff0/bytereader.go        
1970-01-01 01:00:00.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/huff0/bytereader.go        
2024-02-05 04:30:38.000000000 +0100
@@ -0,0 +1,44 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+       b   []byte
+       off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+       b.b = in
+       b.off = 0
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+       v3 := int32(b.b[b.off+3])
+       v2 := int32(b.b[b.off+2])
+       v1 := int32(b.b[b.off+1])
+       v0 := int32(b.b[b.off])
+       return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+       v3 := uint32(b.b[b.off+3])
+       v2 := uint32(b.b[b.off+2])
+       v1 := uint32(b.b[b.off+1])
+       v0 := uint32(b.b[b.off])
+       return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+       return len(b.b) - b.off
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/vendor/github.com/klauspost/compress/huff0/compress.go 
new/vendor/github.com/klauspost/compress/huff0/compress.go
--- old/vendor/github.com/klauspost/compress/huff0/compress.go  2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/huff0/compress.go  2024-02-05 
04:30:38.000000000 +0100
@@ -350,7 +350,6 @@
 // Does not update s.clearCount.
 func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
        reuse = true
-       _ = s.count // Assert that s != nil to speed up the following loop.
        for _, v := range in {
                s.count[v]++
        }
@@ -416,7 +415,7 @@
 
 // minTableLog provides the minimum logSize to safely represent a distribution.
 func (s *Scratch) minTableLog() uint8 {
-       minBitsSrc := highBit32(uint32(s.srcLen)) + 1
+       minBitsSrc := highBit32(uint32(s.br.remain())) + 1
        minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
        if minBitsSrc < minBitsSymbols {
                return uint8(minBitsSrc)
@@ -428,7 +427,7 @@
 func (s *Scratch) optimalTableLog() {
        tableLog := s.TableLog
        minBits := s.minTableLog()
-       maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
+       maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
        if maxBitsSrc < tableLog {
                // Accuracy can be reduced
                tableLog = maxBitsSrc
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/vendor/github.com/klauspost/compress/huff0/huff0.go 
new/vendor/github.com/klauspost/compress/huff0/huff0.go
--- old/vendor/github.com/klauspost/compress/huff0/huff0.go     2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/huff0/huff0.go     2024-02-05 
04:30:38.000000000 +0100
@@ -88,7 +88,7 @@
        // Decoders will return ErrMaxDecodedSizeExceeded is this limit is 
exceeded.
        MaxDecodedSize int
 
-       srcLen int
+       br byteReader
 
        // MaxSymbolValue will override the maximum symbol value of the next 
block.
        MaxSymbolValue uint8
@@ -170,7 +170,7 @@
        if s.fse == nil {
                s.fse = &fse.Scratch{}
        }
-       s.srcLen = len(in)
+       s.br.init(in)
 
        return s, nil
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/vendor/github.com/klauspost/compress/zstd/README.md 
new/vendor/github.com/klauspost/compress/zstd/README.md
--- old/vendor/github.com/klauspost/compress/zstd/README.md     2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/zstd/README.md     2024-02-05 
04:30:38.000000000 +0100
@@ -259,7 +259,7 @@
 
 ## Decompressor
 
-Status: STABLE - there may still be subtle bugs, but a wide variety of content 
has been tested.
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content 
has been tested.
 
 This library is being continuously 
[fuzz-tested](https://github.com/klauspost/compress-fuzz),
 kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/vendor/github.com/klauspost/compress/zstd/enc_best.go 
new/vendor/github.com/klauspost/compress/zstd/enc_best.go
--- old/vendor/github.com/klauspost/compress/zstd/enc_best.go   2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/zstd/enc_best.go   2024-02-05 
04:30:38.000000000 +0100
@@ -43,7 +43,7 @@
        if m.rep < 0 {
                ofc = ofCode(uint32(m.s-m.offset) + 3)
        } else {
-               ofc = ofCode(uint32(m.rep) & 3)
+               ofc = ofCode(uint32(m.rep))
        }
        // Cost, excluding
        ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], 
fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -227,7 +227,7 @@
                                }
                        }
                        l := 4 + e.matchlen(s+4, offset+4, src)
-                       if true {
+                       if rep < 0 {
                                // Extend candidate match backwards as far as 
possible.
                                tMin := s - e.maxMatchOff
                                if tMin < 0 {
@@ -282,7 +282,6 @@
                // Load next and check...
                e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: 
candidateL.offset}
                e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: 
candidateS.offset}
-               index0 := s + 1
 
                // Look far ahead, unless we have a really long match already...
                if best.length < goodEnough {
@@ -358,16 +357,19 @@
                        blk.sequences = append(blk.sequences, seq)
 
                        // Index old s + 1 -> s - 1
+                       index0 := s + 1
                        s = best.s + best.length
-                       nextEmit = s
 
-                       // Index skipped...
-                       end := s
-                       if s > sLimit+4 {
-                               end = sLimit + 4
+                       nextEmit = s
+                       if s >= sLimit {
+                               if debugEncoder {
+                                       println("repeat ended", s, best.length)
+                               }
+                               break encodeLoop
                        }
+                       // Index skipped...
                        off := index0 + e.cur
-                       for index0 < end {
+                       for index0 < s {
                                cv0 := load6432(src, index0)
                                h0 := hashLen(cv0, bestLongTableBits, 
bestLongLen)
                                h1 := hashLen(cv0, bestShortTableBits, 
bestShortLen)
@@ -376,7 +378,6 @@
                                off++
                                index0++
                        }
-
                        switch best.rep {
                        case 2, 4 | 1:
                                offset1, offset2 = offset2, offset1
@@ -385,17 +386,12 @@
                        case 4 | 3:
                                offset1, offset2, offset3 = offset1-1, offset1, 
offset2
                        }
-                       if s >= sLimit {
-                               if debugEncoder {
-                                       println("repeat ended", s, best.length)
-                               }
-                               break encodeLoop
-                       }
                        continue
                }
 
                // A 4-byte match has been found. Update recent offsets.
                // We'll later see if more than 4 bytes.
+               index0 := s + 1
                s = best.s
                t := best.offset
                offset1, offset2, offset3 = s-t, offset1, offset2
@@ -423,25 +419,19 @@
                }
                blk.sequences = append(blk.sequences, seq)
                nextEmit = s
-
-               // Index old s + 1 -> s - 1 or sLimit
-               end := s
-               if s > sLimit-4 {
-                       end = sLimit - 4
+               if s >= sLimit {
+                       break encodeLoop
                }
 
-               off := index0 + e.cur
-               for index0 < end {
+               // Index old s + 1 -> s - 1
+               for index0 < s {
                        cv0 := load6432(src, index0)
                        h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
                        h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
+                       off := index0 + e.cur
                        e.longTable[h0] = prevEntry{offset: off, prev: 
e.longTable[h0].offset}
                        e.table[h1] = prevEntry{offset: off, prev: 
e.table[h1].offset}
                        index0++
-                       off++
-               }
-               if s >= sLimit {
-                       break encodeLoop
                }
        }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/vendor/github.com/klauspost/compress/zstd/enc_better.go 
new/vendor/github.com/klauspost/compress/zstd/enc_better.go
--- old/vendor/github.com/klauspost/compress/zstd/enc_better.go 2024-01-17 
14:05:58.000000000 +0100
+++ new/vendor/github.com/klauspost/compress/zstd/enc_better.go 2024-02-05 
04:30:38.000000000 +0100
@@ -145,7 +145,7 @@
                var t int32
                // We allow the encoder to optionally turn off repeat offsets 
across blocks
                canRepeat := len(blk.sequences) > 2
-               var matched, index0 int32
+               var matched int32
 
                for {
                        if debugAsserts && canRepeat && offset1 == 0 {
@@ -162,7 +162,6 @@
                        off := s + e.cur
                        e.longTable[nextHashL] = prevEntry{offset: off, prev: 
candidateL.offset}
                        e.table[nextHashS] = tableEntry{offset: off, val: 
uint32(cv)}
-                       index0 = s + 1
 
                        if canRepeat {
                                if repIndex >= 0 && load3232(src, repIndex) == 
uint32(cv>>(repOff*8)) {
@@ -259,6 +258,7 @@
                                        }
                                        blk.sequences = append(blk.sequences, 
seq)
 
+                                       index0 := s + repOff2
                                        s += lenght + repOff2
                                        nextEmit = s
                                        if s >= sLimit {
@@ -498,15 +498,15 @@
                }
 
                // Index match start+1 (long) -> s - 1
-               off := index0 + e.cur
+               index0 := s - l + 1
                for index0 < s-1 {
                        cv0 := load6432(src, index0)
                        cv1 := cv0 >> 8
                        h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+                       off := index0 + e.cur
                        e.longTable[h0] = prevEntry{offset: off, prev: 
e.longTable[h0].offset}
                        e.table[hashLen(cv1, betterShortTableBits, 
betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
                        index0 += 2
-                       off += 2
                }
 
                cv = load6432(src, s)
@@ -672,7 +672,7 @@
                var t int32
                // We allow the encoder to optionally turn off repeat offsets 
across blocks
                canRepeat := len(blk.sequences) > 2
-               var matched, index0 int32
+               var matched int32
 
                for {
                        if debugAsserts && canRepeat && offset1 == 0 {
@@ -691,7 +691,6 @@
                        e.markLongShardDirty(nextHashL)
                        e.table[nextHashS] = tableEntry{offset: off, val: 
uint32(cv)}
                        e.markShortShardDirty(nextHashS)
-                       index0 = s + 1
 
                        if canRepeat {
                                if repIndex >= 0 && load3232(src, repIndex) == 
uint32(cv>>(repOff*8)) {
@@ -727,6 +726,7 @@
                                        blk.sequences = append(blk.sequences, 
seq)
 
                                        // Index match start+1 (long) -> s - 1
+                                       index0 := s + repOff
                                        s += lenght + repOff
 
                                        nextEmit = s
@@ -790,6 +790,7 @@
                                        }
                                        blk.sequences = append(blk.sequences, 
seq)
 
+                                       index0 := s + repOff2
                                        s += lenght + repOff2
                                        nextEmit = s
                                        if s >= sLimit {
@@ -1023,18 +1024,18 @@
                }
 
                // Index match start+1 (long) -> s - 1
-               off := index0 + e.cur
+               index0 := s - l + 1
                for index0 < s-1 {
                        cv0 := load6432(src, index0)
                        cv1 := cv0 >> 8
                        h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+                       off := index0 + e.cur
                        e.longTable[h0] = prevEntry{offset: off, prev: 
e.longTable[h0].offset}
                        e.markLongShardDirty(h0)
                        h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
                        e.table[h1] = tableEntry{offset: off + 1, val: 
uint32(cv1)}
                        e.markShortShardDirty(h1)
                        index0 += 2
-                       off += 2
                }
 
                cv = load6432(src, s)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/vendor/modules.txt new/vendor/modules.txt
--- old/vendor/modules.txt      2024-01-17 14:05:58.000000000 +0100
+++ new/vendor/modules.txt      2024-02-05 04:30:38.000000000 +0100
@@ -195,8 +195,8 @@
 # github.com/json-iterator/go v1.1.12
 ## explicit; go 1.12
 github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.4
-## explicit; go 1.19
+# github.com/klauspost/compress v1.17.4 => github.com/klauspost/compress 
v1.17.2
+## explicit; go 1.18
 github.com/klauspost/compress
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/huff0
@@ -536,3 +536,4 @@
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
+# github.com/klauspost/compress => github.com/klauspost/compress v1.17.2

Reply via email to