Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package dnscrypt-proxy for openSUSE:Factory 
checked in at 2024-04-23 18:56:28
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/dnscrypt-proxy (Old)
 and      /work/SRC/openSUSE:Factory/.dnscrypt-proxy.new.27645 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "dnscrypt-proxy"

Tue Apr 23 18:56:28 2024 rev:19 rq:1169724 version:2.1.5

Changes:
--------
--- /work/SRC/openSUSE:Factory/dnscrypt-proxy/dnscrypt-proxy.changes    
2024-02-06 16:34:41.827373833 +0100
+++ /work/SRC/openSUSE:Factory/.dnscrypt-proxy.new.27645/dnscrypt-proxy.changes 
2024-04-23 18:56:54.328263563 +0200
@@ -1,0 +2,5 @@
+Sun Apr 21 12:00:00 UTC 2024 - [email protected]
+
+- added patch quic-go.patch (boo#1222473)
+
+-------------------------------------------------------------------

New:
----
  quic-go.patch

BETA DEBUG BEGIN:
  New:
- added patch quic-go.patch (boo#1222473)
BETA DEBUG END:

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ dnscrypt-proxy.spec ++++++
--- /var/tmp/diff_new_pack.YgPkxI/_old  2024-04-23 18:56:55.276297817 +0200
+++ /var/tmp/diff_new_pack.YgPkxI/_new  2024-04-23 18:56:55.280297963 +0200
@@ -43,6 +43,8 @@
 Source6:        %{name}.socket.conf
 # dnscrypt user configuration
 Source7:        %{user_group}-user.conf
+# can be dropped in next release with quic-go v0.42 included (boo#1222473)
+Patch0:         quic-go.patch
 BuildRequires:  golang-packaging
 BuildRequires:  pkgconfig
 BuildRequires:  systemd-rpm-macros
@@ -67,7 +69,7 @@
 and ODoH (Oblivious DoH).
 
 %prep
-%setup -q -n %{name}-%{version}
+%autosetup -p1 -n %{name}-%{version}
 
 # replace with home directory from spec
 sed -i "s/home_dir_placeholder/%{home_dir_escaped}/" %{SOURCE7}

++++++ quic-go.patch ++++++
From: [email protected]
Date: 2024-04-21 12:00:00
Subject: Memory Exhaustion Attack against QUIC's Connection ID Mechanism
References: 
https://github.com/quic-go/quic-go/commit/4a99b816ae3ab03ae5449d15aac45147c85ed47a
  https://github.com/quic-go/quic-go/security/advisories/GHSA-c33x-xqrf-c478
  https://bugzilla.suse.com/show_bug.cgi?id=1222473

This tries to backport commit
https://github.com/quic-go/quic-go/commit/4a99b816ae3ab03ae5449d15aac45147c85ed47a.patch
from Marten Seemann <[email protected]>
to the vendored older version of quic-go.

dnscrypt-proxy upstream already vendors version 0.42 of quic-go with hack
included, but is not released.

Patch should be dropped with next release of dnscrypt-proxy.

---

diff -r -U 5 a/vendor/github.com/quic-go/quic-go/connection.go 
b/vendor/github.com/quic-go/quic-go/connection.go
--- a/vendor/github.com/quic-go/quic-go/connection.go
+++ b/vendor/github.com/quic-go/quic-go/connection.go
@@ -516,11 +516,14 @@
 
        var sendQueueAvailable <-chan struct{}
 
 runLoop:
        for {
-               // Close immediately if requested
+               if s.framer.QueuedTooManyControlFrames() {
+                       s.closeLocal(&qerr.TransportError{ErrorCode: 
InternalError})
+               }
+  // Close immediately if requested
                select {
                case closeErr = <-s.closeChan:
                        break runLoop
                default:
                }
diff -r -U 5 a/vendor/github.com/quic-go/quic-go/framer.go 
b/vendor/github.com/quic-go/quic-go/framer.go
--- a/vendor/github.com/quic-go/quic-go/framer.go
+++ b/vendor/github.com/quic-go/quic-go/framer.go
@@ -19,22 +19,32 @@
 
        AddActiveStream(protocol.StreamID)
        AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, 
protocol.VersionNumber) ([]ackhandler.StreamFrame, protocol.ByteCount)
 
        Handle0RTTRejection() error
+       
+       // QueuedTooManyControlFrames says if the control frame queue exceeded 
its maximum queue length.
+       // This is a hack.
+       // It is easier to implement than propagating an error return value in 
QueueControlFrame.
+       // The correct solution would be to queue frames with their respective 
structs.
+       // See https://github.com/quic-go/quic-go/issues/4271 for the queueing 
of stream-related control frames.
+       QueuedTooManyControlFrames() bool
 }
 
+const maxControlFrames = 16 << 10
+
 type framerI struct {
        mutex sync.Mutex
 
        streamGetter streamGetter
 
        activeStreams map[protocol.StreamID]struct{}
        streamQueue   ringbuffer.RingBuffer[protocol.StreamID]
 
        controlFrameMutex sync.Mutex
        controlFrames     []wire.Frame
+       queuedTooManyControlFrames bool
 }
 
 var _ framer = &framerI{}
 
 func newFramer(streamGetter streamGetter) framer {
@@ -56,11 +66,24 @@
        f.controlFrameMutex.Unlock()
        return hasData
 }
 
 func (f *framerI) QueueControlFrame(frame wire.Frame) {
+       var returnearly bool
        f.controlFrameMutex.Lock()
+ // This is a hack.
+       if len(f.controlFrames) >= maxControlFrames {
+               returnearly = true
+       }
+       f.controlFrameMutex.Unlock()
+ if returnearly {
+   f.mutex.Lock()
+   f.queuedTooManyControlFrames = true
+   f.mutex.Unlock()
+   return
+ }
+ f.controlFrameMutex.Lock()
        f.controlFrames = append(f.controlFrames, frame)
        f.controlFrameMutex.Unlock()
 }
 
 func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen 
protocol.ByteCount, v protocol.VersionNumber) ([]ackhandler.Frame, 
protocol.ByteCount) {
@@ -78,10 +101,17 @@
        }
        f.controlFrameMutex.Unlock()
        return frames, length
 }
 
+func (f *framerI) QueuedTooManyControlFrames() bool {
+       f.mutex.Lock()
+ toomany := f.queuedTooManyControlFrames
+       f.mutex.Unlock()
+ return toomany
+}
+
 func (f *framerI) AddActiveStream(id protocol.StreamID) {
        f.mutex.Lock()
        if _, ok := f.activeStreams[id]; !ok {
                f.streamQueue.PushBack(id)
                f.activeStreams[id] = struct{}{}

Reply via email to