[arch-commits] Commit in libxcb/trunk (2 files)

2014-07-12 Thread Laurent Carlier
Date: Saturday, July 12, 2014 @ 19:34:52
  Author: lcarlier
Revision: 216838

upgpkg: libxcb 1.10-3

Fix Present support (FS#41172)

Added:
  libxcb/trunk/Force-XCB-event-structures-with-64-bit-extended-fiel.patch
Modified:
  libxcb/trunk/PKGBUILD

+
 Force-XCB-event-structures-with-64-bit-extended-fiel.patch |  102 +++
 PKGBUILD   |   12 -
 2 files changed, 110 insertions(+), 4 deletions(-)

Added: Force-XCB-event-structures-with-64-bit-extended-fiel.patch
===
--- Force-XCB-event-structures-with-64-bit-extended-fiel.patch  
(rev 0)
+++ Force-XCB-event-structures-with-64-bit-extended-fiel.patch  2014-07-12 
17:34:52 UTC (rev 216838)
@@ -0,0 +1,102 @@
+From 3b72a2c9d1d656c74c691a45689e1d637f669e3a Mon Sep 17 00:00:00 2001
+From: Kenneth Graunke kenn...@whitecape.org
+Date: Fri, 03 Jan 2014 23:08:33 +
+Subject: Force XCB event structures with 64-bit extended fields to be packed.
+
+With the advent of the Present extension, some events (such as
+PresentCompleteNotify) now use native 64-bit types on the wire.
+
+For XGE events, we insert an extra uint32_t full_sequence field
+immediately after the first 32 bytes of data.  Normally, this causes
+the subsequent fields to be shifted over by 4 bytes, and the structure
+to grow in size by 4 bytes.  Everything works fine.
+
+However, if event contains 64-bit extended fields, this may result in
+the compiler adding an extra 4 bytes of padding so that those fields
+remain aligned on 64-bit boundaries.  This causes the structure to grow
+by 8 bytes, not 4.  Unfortunately, XCB doesn't realize this, and
+always believes that the length only increased by 4.  read_packet()
+then fails to malloc enough memory to hold the event, and the event
+processing code uses the wrong offsets.
+
+To fix this, mark any event structures containing 64-bit extended
+fields with __attribute__((__packed__)).
+
+v2: Use any(...) instead of True in (...), as suggested by
+Daniel Martin.
+
+v3 (Alan Coopersmith): Fix build with Solaris Studio 12.3 by moving the
+attribute to after the structure definition.
+
+Signed-off-by: Kenneth Graunke kenn...@whitecape.org
+Reviewed-by: Keith Packard kei...@keithp.com [v1]
+Reviewed-by: Josh Triplett j...@joshtriplett.org [v1]
+Reviewed-by: Daniel Martin consume.no...@gmail.com
+Signed-off-by: Alan Coopersmith alan.coopersm...@oracle.com
+---
+diff --git a/src/c_client.py b/src/c_client.py
+index 99fd307..45de544 100644
+--- a/src/c_client.py
 b/src/c_client.py
+@@ -1762,7 +1762,7 @@ def c_simple(self, name):
+ # Iterator
+ _c_iterator(self, name)
+ 
+-def _c_complex(self):
++def _c_complex(self, force_packed = False):
+ '''
+ Helper function for handling all structure types.
+ Called for all structs, requests, replies, events, errors.
+@@ -1817,7 +1817,7 @@ def _c_complex(self):
+ if b.type.has_name:
+ _h('} %s;', b.c_field_name)
+ 
+-_h('} %s;', self.c_type)
++_h('} %s%s;', 'XCB_PACKED ' if force_packed else '', self.c_type)
+ 
+ def c_struct(self, name):
+ '''
+@@ -2902,6 +2902,7 @@ def c_event(self, name):
+ # events while generating the structure for them. Otherwise we would read
+ # garbage (the internal full_sequence) when accessing normal event fields
+ # there.
++force_packed = False
+ if hasattr(self, 'is_ge_event') and self.is_ge_event and self.name == 
name:
+ event_size = 0
+ for field in self.fields:
+@@ -2911,6 +2912,11 @@ def c_event(self, name):
+ full_sequence = Field(tcard32, tcard32.name, 'full_sequence', 
False, True, True)
+ idx = self.fields.index(field)
+ self.fields.insert(idx + 1, full_sequence)
++
++# If the event contains any 64-bit extended fields, they need
++# to remain aligned on a 64-bit boundary.  Adding 
full_sequence
++# would normally break that; force the struct to be packed.
++force_packed = any(f.type.size == 8 and f.type.is_simple for 
f in self.fields[(idx+1):])
+ break
+ 
+ _c_type_setup(self, name, ('event',))
+@@ -2920,7 +2926,7 @@ def c_event(self, name):
+ 
+ if self.name == name:
+ # Structure definition
+-_c_complex(self)
++_c_complex(self, force_packed)
+ else:
+ # Typedef
+ _h('')
+diff --git a/src/xcb.h b/src/xcb.h
+index e62c985..73c77a3 100644
+--- a/src/xcb.h
 b/src/xcb.h
+@@ -51,6 +51,8 @@ extern C {
+  * @file xcb.h
+  */
+ 
++#define XCB_PACKED __attribute__((__packed__))
++
+ /**
+  * @defgroup XCB_Core_API XCB Core API
+  * @brief Core API of the XCB library.
+--
+cgit v0.9.0.2-2-gbebe

Modified: PKGBUILD
===
--- PKGBUILD

[arch-commits] Commit in libxcb/trunk (2 files)

2014-05-11 Thread Laurent Carlier
Date: Sunday, May 11, 2014 @ 10:15:06
  Author: lcarlier
Revision: 212220

upgpkg: libxcb 1.10-2

fix FS#40289

Added:
  libxcb/trunk/Ensure-xcb-owns-socket-and-no-other-threads-are-writ.patch
Modified:
  libxcb/trunk/PKGBUILD

+
 Ensure-xcb-owns-socket-and-no-other-threads-are-writ.patch |  121 +++
 PKGBUILD   |   11 -
 2 files changed, 129 insertions(+), 3 deletions(-)

Added: Ensure-xcb-owns-socket-and-no-other-threads-are-writ.patch
===
--- Ensure-xcb-owns-socket-and-no-other-threads-are-writ.patch  
(rev 0)
+++ Ensure-xcb-owns-socket-and-no-other-threads-are-writ.patch  2014-05-11 
08:15:06 UTC (rev 212220)
@@ -0,0 +1,121 @@
+From be0fe56c3bcad5124dcc6c47a2fad01acd16f71a Mon Sep 17 00:00:00 2001
+From: Keith Packard kei...@keithp.com
+Date: Mon, 23 Dec 2013 21:15:20 -0800
+Subject: [PATCH] Ensure xcb owns socket and no other threads are writing
+ before send_request
+
+send_request may only write to out.queue if no other thread is busy
+writing to the network (as that thread may be writing from out.queue).
+
+send_request may only allocate request sequence numbers if XCB owns
+the socket.
+
+Therefore, send_request must make sure that both conditions are true
+when it holds iolock, which can only be done by looping until both
+conditions are true without having dropped the lock waiting for the
+second condition.
+
+We choose to get the socket back from Xlib first as get_socket_back
+has a complicated test and checking for other threads writing is a
+simple in-lined check.
+
+This also changes the sequence number checks (64k requests with no
+reply, 4M request wrapping) to ensure that both conditions are true
+before queueing the request.
+
+Signed-off-by: Keith Packard kei...@keithp.com
+Reviewed-by: Uli Schlachter psyc...@znc.in
+---
+ src/xcb_out.c | 57 -
+ 1 file changed, 40 insertions(+), 17 deletions(-)
+
+diff --git a/src/xcb_out.c b/src/xcb_out.c
+index 18bb5f9..dc42954 100644
+--- a/src/xcb_out.c
 b/src/xcb_out.c
+@@ -103,6 +103,33 @@ static void get_socket_back(xcb_connection_t *c)
+ _xcb_in_replies_done(c);
+ }
+ 
++static void prepare_socket_request(xcb_connection_t *c)
++{
++/* We're about to append data to out.queue, so we need to
++ * atomically test for an external socket owner *and* some other
++ * thread currently writing.
++ *
++ * If we have an external socket owner, we have to get the socket back
++ * before we can use it again.
++ *
++ * If some other thread is writing to the socket, we assume it's
++ * writing from out.queue, and so we can't stick data there.
++ *
++ * We satisfy this condition by first calling get_socket_back
++ * (which may drop the lock, but will return when XCB owns the
++ * socket again) and then checking for another writing thread and
++ * escaping the loop if we're ready to go.
++ */
++for (;;) {
++if(c-has_error)
++return;
++get_socket_back(c);
++if (!c-out.writing)
++break;
++pthread_cond_wait(c-out.cond, c-iolock);
++}
++}
++
+ /* Public interface */
+ 
+ void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
+@@ -236,24 +263,23 @@ unsigned int xcb_send_request(xcb_connection_t *c, int 
flags, struct iovec *vect
+ 
+ /* get a sequence number and arrange for delivery. */
+ pthread_mutex_lock(c-iolock);
+-/* wait for other writing threads to get out of my way. */
+-while(c-out.writing)
+-pthread_cond_wait(c-out.cond, c-iolock);
+-get_socket_back(c);
++
++prepare_socket_request(c);
+ 
+ /* send GetInputFocus (sync_req) when 64k-2 requests have been sent 
without
+- * a reply. */
+-if(req-isvoid  c-out.request == c-in.request_expected + (1  16) - 
2)
+-send_sync(c);
+-/* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
++ * a reply.
++ * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
+  * applications see sequence 0 as that is used to indicate
+- * an error in sending the request */
+-if((unsigned int) (c-out.request + 1) == 0)
++ * an error in sending the request
++ */
++ 
++while ((req-isvoid  c-out.request == c-in.request_expected + (1  
16) - 2) ||
++   (unsigned int) (c-out.request + 1) == 0)
++{
+ send_sync(c);
++prepare_socket_request(c);
++}
+ 
+-/* The above send_sync calls could drop the I/O lock, but this
+- * thread will still exclude any other thread that tries to write,
+- * so the sequence number postconditions still hold. */
+ send_request(c, req-isvoid, workaround, flags, vector, veclen);
+ request = c-has_error ? 0 : c-out.request;
+