Re: struct apr_pollset_private_t

2009-06-17 Thread Jack Andrews
On Wed, Jun 17, 2009 at 2:54 AM, Bojan Smojverbo...@rexursive.com wrote:
 On Tue, 2009-06-16 at 00:41 +0200, Jack Andrews wrote:
 feedback welcome - it can probably be improved

 Did you really want to use functions for casting?

removed the casting functions:

Index: pushtest/exapr/include/arch/unix/apr_arch_poll_private.h
===
--- pushtest.orig/exapr/include/arch/unix/apr_arch_poll_private.h   
2009-03-06
02:38:58.0 +0100
+++ pushtest/exapr/include/arch/unix/apr_arch_poll_private.h2009-06-16
00:04:17.17300 +0200
@@ -104,7 +104,6 @@

 #endif

-typedef struct apr_pollset_private_t apr_pollset_private_t;
 typedef struct apr_pollset_provider_t apr_pollset_provider_t;
 typedef struct apr_pollcb_provider_t apr_pollcb_provider_t;
 struct apr_pollset_t
@@ -115,7 +114,7 @@
 apr_uint32_t flags;
 /* Pipe descriptors used for wakeup */
 apr_file_t *wakeup_pipe[2];
-apr_pollset_private_t *p;
+void *p;
 apr_pollset_provider_t *provider;
 };

Index: pushtest/exapr/poll/unix/poll.c
===
--- pushtest.orig/exapr/poll/unix/poll.c2009-03-03 01:29:22.0 
+0100
+++ pushtest/exapr/poll/unix/poll.c 2009-06-17 11:07:08.32200 +0200
@@ -147,7 +147,7 @@

 #endif /* POLL_USES_POLL */

-struct apr_pollset_private_t
+struct apr_poll_pollset_t
 {
 struct pollfd *pollset;
 apr_pollfd_t *query_set;
@@ -159,6 +159,8 @@
 apr_pool_t *p,
 apr_uint32_t flags)
 {
+struct apr_poll_pollset_t **pollset_p =
+  (struct apr_poll_pollset_t **)pollset-p;
 if (flags  APR_POLLSET_THREADSAFE) {
 return APR_ENOTIMPL;
 }
@@ -167,10 +169,10 @@
 return APR_ENOTIMPL;
 }
 #endif
-pollset-p = apr_palloc(p, sizeof(apr_pollset_private_t));
-pollset-p-pollset = apr_palloc(p, size * sizeof(struct pollfd));
-pollset-p-query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
-pollset-p-result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
+*pollset_p = apr_palloc(p, sizeof(struct apr_poll_pollset_t));
+(*pollset_p)-pollset = apr_palloc(p, size * sizeof(struct pollfd));
+(*pollset_p)-query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
+(*pollset_p)-result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));

 return APR_SUCCESS;
 }
@@ -178,14 +180,16 @@
 static apr_status_t impl_pollset_add(apr_pollset_t *pollset,
  const apr_pollfd_t *descriptor)
 {
+struct apr_poll_pollset_t *pollset_p =
+  (struct apr_poll_pollset_t *)pollset-p;
 if (pollset-nelts == pollset-nalloc) {
 return APR_ENOMEM;
 }

-pollset-p-query_set[pollset-nelts] = *descriptor;
+pollset_p-query_set[pollset-nelts] = *descriptor;

 if (descriptor-desc_type == APR_POLL_SOCKET) {
-pollset-p-pollset[pollset-nelts].fd = descriptor-desc.s-socketdes;
+pollset_p-pollset[pollset-nelts].fd = descriptor-desc.s-socketdes;
 }
 else {
 #if APR_FILES_AS_SOCKETS
@@ -193,12 +197,12 @@
 #else
 if ((pollset-flags  APR_POLLSET_WAKEABLE) 
 descriptor-desc.f == pollset-wakeup_pipe[0])
-pollset-p-pollset[pollset-nelts].fd =
(SOCKET)descriptor-desc.f-filedes;
+pollset_p-pollset[pollset-nelts].fd =
(SOCKET)descriptor-desc.f-filedes;
 else
 return APR_EBADF;
 #endif
 }
-pollset-p-pollset[pollset-nelts].events =
+pollset_p-pollset[pollset-nelts].events =
 get_event(descriptor-reqevents);
 pollset-nelts++;

@@ -209,20 +213,22 @@
 const apr_pollfd_t *descriptor)
 {
 apr_uint32_t i;
+struct apr_poll_pollset_t *pollset_p =
+  (struct apr_poll_pollset_t *)pollset-p;

 for (i = 0; i  pollset-nelts; i++) {
-if (descriptor-desc.s == pollset-p-query_set[i].desc.s) {
+if (descriptor-desc.s == pollset_p-query_set[i].desc.s) {
 /* Found an instance of the fd: remove this and any other copies */
 apr_uint32_t dst = i;
 apr_uint32_t old_nelts = pollset-nelts;
 pollset-nelts--;
 for (i++; i  old_nelts; i++) {
-if (descriptor-desc.s == pollset-p-query_set[i].desc.s) {
+if (descriptor-desc.s == pollset_p-query_set[i].desc.s) {
 pollset-nelts--;
 }
 else {
-pollset-p-pollset[dst] = pollset-p-pollset[i];
-pollset-p-query_set[dst] = pollset-p-query_set[i];
+pollset_p-pollset[dst] = pollset_p-pollset[i];
+pollset_p-query_set[dst] = pollset_p-query_set[i];
 dst++;
 }
 }
@@ -241,14 +247,16 @@
 int ret;
 apr_status_t rv = APR_SUCCESS;
 apr_uint32_t i, j;
+struct 

Re: poll/select process's IO on windows

2009-06-17 Thread Erik Huelsmann
Hi Jack,

Regardless of the portability library that you're using, Windows
doesn't allow select() operations on any other than socket handles.

I don't have personal experience how to work around that issue with
APR though, I'm sorry.


With kind regards,

Erik.

On Tue, Jun 16, 2009 at 8:42 PM, Jack Andrewseffb...@gmail.com wrote:
 hi,

 i want to detect IO on a child process stdio as well
 as a socket from elsewhere.  ie. i want a select()
 on /windows/.

 is it possible with APR?  or do i have to hack around?

 my first attempt is here:

 int main(int c,char** argv)
 { apr_procattr_t *attr;
  const char*args[]={cmd.exe,0};
  apr_sockaddr_t *sa;
  apr_socket_t *sock;
  apr_file_t *fds[4];
  apr_pollfd_t pfd[4],*pfdout;
  apr_pollset_t *pollset;

  fds[0]=(apr_file_t*)sock;
  AS(apr_procattr_create(attr, pool));
  AS(apr_procattr_io_set(attr, APR_FULL_BLOCK,APR_FULL_BLOCK,APR_FULL_BLOCK));
  AS(apr_procattr_cmdtype_set(attr, APR_PROGRAM));
  AS(apr_proc_create(newproc, c:\\Windows\\system32\\cmd.exe, args,
 0, attr, pool));
  fds[1]=newproc.in;fds[2]=newproc.out;fds[3]=newproc.err;
  AS(apr_pollset_create(pollset, 4, pool, APR_POLLSET_WAKEABLE));

 #define setpfd(x,a,b,c,d)
 pfd[x].desc_type=(a),pfd[x].reqevents=(b),pfd[x].desc.s=(c),pfd[x].client_data=(d)
  for(i=0;i4;i++)
  { 
 setpfd(i,i?APR_POLL_FILE:APR_POLL_SOCKET,APR_POLLIN|APR_POLLOUT|APR_POLLHUP,(apr_socket_t*)fds[i],NULL);
    AS(apr_pollset_add(pollset,pfd+i));
  }
  for(;APR_TIMEUP==apr_pollset_poll(pollset,0,n,pfdout);)
  { printf(hoo\n);
  }
  return 0;
 }



 ta, jack.



Re: struct apr_pollset_private_t

2009-06-17 Thread Jack Andrews
just realized that i no longer need the additional redirection here:

[i'll send another patch including any other feedback i might get]

 @@ -159,6 +159,8 @@
                                         apr_pool_t *p,
                                         apr_uint32_t flags)
  {
 +    struct apr_poll_pollset_t **pollset_p =
 +      (struct apr_poll_pollset_t **)pollset-p;
     if (flags  APR_POLLSET_THREADSAFE) {
         return APR_ENOTIMPL;
     }
 @@ -167,10 +169,10 @@
         return APR_ENOTIMPL;
     }
  #endif
 -    pollset-p = apr_palloc(p, sizeof(apr_pollset_private_t));
 -    pollset-p-pollset = apr_palloc(p, size * sizeof(struct pollfd));
 -    pollset-p-query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
 -    pollset-p-result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
 +    *pollset_p = apr_palloc(p, sizeof(struct apr_poll_pollset_t));
 +    (*pollset_p)-pollset = apr_palloc(p, size * sizeof(struct pollfd));
 +    (*pollset_p)-query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
 +    (*pollset_p)-result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));

     return APR_SUCCESS;
  }


Re: poll/select process's IO on windows

2009-06-17 Thread Mladen Turk

Jack Andrews wrote:
 hi,

 i want to detect IO on a child process stdio as well
 as a socket from elsewhere.  ie. i want a select()
 on /windows/.

 is it possible with APR?  or do i have to hack around?


It's not possible with APR, however you can make a hack
if you know its stdin (or stdout, stderr) and you need to know
if there is a data pending.

#if APR_FILES_AS_SOCKETS
static apr_status_t get_file_event(apr_file_t *f, apr_pool_t *pool)
{
apr_int32_t  nd;
apr_pollfd_t wait_pollfd;

wait_pollfd.p = pool;
wait_pollfd.desc_type = APR_POLL_FILE;
wait_pollfd.reqevents = APR_POLLIN;
wait_pollfd.desc.f= f;

return apr_poll(wait_pollfd, 1, nd, 0);
}
#elif defined(WIN32)
static apr_status_t get_file_event(apr_file_t *f, apr_pool_t *pool)
{
HANDLE h;
char   c;
DWORD  r;

apr_os_file_get(h, f);
if (PeekNamedPipe(h, c, 1, r, NULL, NULL)) {
if (r == 1)
APR_POLLOUT;
}
return APR_TIMEUP;
}
#endif

This will return APR_POLLOUT if there is data present to
be read, or APR_TIMEUP if not. customize at will ;)


Regards
--
^(TM)


Re: poll/select process's IO on windows

2009-06-17 Thread Jack Andrews
 i want to detect IO on a child process stdio as well
 as a socket from elsewhere.  ie. i want a select()
 on /windows/.

i wonder if it would be useful to have an
  apr_socket_from_files(files_in, files_out, socket)
that plugs a socket onto the end of pipes on windows.

  . only implement on windows

then we could [effectively] poll a process's stdio.

if you think it's worth pursuing, i'll have a crack at it.


ta, jack.


Segfault in testlockperf test on AIX

2009-06-17 Thread Gavin Sherry
Hi all,

I've hit a segfault trying to build APR 1.3.5 on AIX 5.3.

I configured APR with: --enable-shared=no --enable-static=yes
--enable-threads

The segfault is triggered by the testlockperf (other tests fail similarly)
with the following bt:

#0  apr_proc_mutex_unix_setup_lock () at locks/unix/proc_mutex.c:497
#1  0x1000318c in apr_initialize () at misc/unix/start.c:51
#2  0x1ff8 in main (argc=1, argv=0x2ff22a24) at testlockperf.c:236

Here's some stepping through the code:

171 proc_mutex_op_on.sem_num = 0;
(gdb)
172 proc_mutex_op_on.sem_op = -1;
(gdb)
173 proc_mutex_op_on.sem_flg = SEM_UNDO;
(gdb)
176 proc_mutex_op_try.sem_flg = SEM_UNDO | IPC_NOWAIT;
(gdb)
178 proc_mutex_op_off.sem_op = 1;
(gdb)
175 proc_mutex_op_try.sem_op = -1;
(gdb)
179 proc_mutex_op_off.sem_flg = SEM_UNDO;
(gdb)
488 proc_mutex_lock_it.l_whence = SEEK_SET;   /* from current point
*/
(gdb)
178 proc_mutex_op_off.sem_op = 1;
(gdb)
497 proc_mutex_unlock_it.l_pid = 0;   /* pid not actually
interesting */
(gdb)

Program received signal SIGSEGV, Segmentation fault.
apr_proc_mutex_unix_setup_lock () at locks/unix/proc_mutex.c:497
497 proc_mutex_unlock_it.l_pid = 0;   /* pid not actually
interesting */

GCC version 4.2.4.

Any assistance would be appreciated.

Thanks,
Gavin


Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Bojan Smojver
On Thu, 2009-06-18 at 00:51 +0200, Gavin Sherry wrote:
 497 proc_mutex_unlock_it.l_pid = 0;   /* pid not
 actually interesting */

What's in proc_mutex_unlock_it when this happens? What's its size?

Can you tell us what your fcntl.h holds for struct flock?

PS. I do not have access to AIX.

-- 
Bojan



Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Gavin Sherry
2009/6/18 Bojan Smojver bo...@rexursive.com

 On Thu, 2009-06-18 at 00:51 +0200, Gavin Sherry wrote:
  497 proc_mutex_unlock_it.l_pid = 0;   /* pid not
  actually interesting */

 What's in proc_mutex_unlock_it when this happens? What's its size?


$1 = {l_type = 0, l_whence = 0, l_start = 0, l_len = 0, l_sysid = 0, l_pid =
0, l_vfs = 0}
(gdb) print sizeof(proc_mutex_unlock_it)
$2 = 24
ptype proc_mutex_unlock_it
type = struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
unsigned int l_sysid;
pid_t l_pid;
int l_vfs;
}

From flock.h:

struct  flock   {
short   l_type;
short   l_whence;
#ifndef _LARGE_FILES
#ifndef __64BIT__
off_t   l_start;
off_t   l_len;  /* len = 0 means until end of file */
#endif
#endif
unsigned intl_sysid;
#ifdef  _NONSTD_TYPES
ushort  l_pid_ext;
ushort  l_pid;
#else
pid_t   l_pid;
#endif
int l_vfs;
#if defined(_LARGE_FILES) || defined(__64BIT__)
/* If LARGE FILES or 64 BIT then off_t is 64 bits and struct flock
 * must be laid out as struct flock64
 */
off_t   l_start;
off_t   l_len;
#endif
};

Thanks,
Gavin


Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Bojan Smojver
On Thu, 2009-06-18 at 02:18 +0200, Gavin Sherry wrote:
 $2 = 24
 ptype proc_mutex_unlock_it
 type = struct flock {
 short l_type;
 short l_whence;
 off_t l_start;
 off_t l_len;
 unsigned int l_sysid;
 pid_t l_pid;
 int l_vfs;
 }

I'm guessing here: short 2 bytes, off_t 8 bytes, int 4 bytes, pid_t 4
bytes. So, we have 2 + 2 + 8 + 8 + 4 + 4 + 4 = 32. Is that right?

-- 
Bojan



Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Gavin Sherry
2009/6/18 Bojan Smojver bo...@rexursive.com

 On Thu, 2009-06-18 at 02:18 +0200, Gavin Sherry wrote:
  $2 = 24
  ptype proc_mutex_unlock_it
  type = struct flock {
  short l_type;
  short l_whence;
  off_t l_start;
  off_t l_len;
  unsigned int l_sysid;
  pid_t l_pid;
  int l_vfs;
  }

 I'm guessing here: short 2 bytes, off_t 8 bytes, int 4 bytes, pid_t 4
 bytes. So, we have 2 + 2 + 8 + 8 + 4 + 4 + 4 = 32. Is that right?


No, it comes to 24 bytes. short = 2 + 2, off_t = 4 + 4, + 4 + 4 + 4 = 24.
off_t = 4 for this build because I've forced it to 32 bit (gcc -m32) based
on some other requirements.

Thanks,
Gavin


Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Bojan Smojver
On Thu, 2009-06-18 at 02:45 +0200, Gavin Sherry wrote:
 No, it comes to 24 bytes. short = 2 + 2, off_t = 4 + 4, + 4 + 4 + 4 =
 24. off_t = 4 for this build because I've forced it to 32 bit (gcc
 -m32) based on some other requirements.

OK. Hmm, interesting. So, it segfaults on assigning l_pid.

Can you try this. Break at that assignment and then try assigning to one
of the fields in the structure yourself. Do any of them work?

-- 
Bojan



Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Gavin Sherry
2009/6/18 Bojan Smojver bo...@rexursive.com

 On Thu, 2009-06-18 at 02:45 +0200, Gavin Sherry wrote:
  No, it comes to 24 bytes. short = 2 + 2, off_t = 4 + 4, + 4 + 4 + 4 =
  24. off_t = 4 for this build because I've forced it to 32 bit (gcc
  -m32) based on some other requirements.

 OK. Hmm, interesting. So, it segfaults on assigning l_pid.

 Can you try this. Break at that assignment and then try assigning to one
 of the fields in the structure yourself. Do any of them work?


This being something of a heisenbug the minute I tried to do this the
location of the segv changed. So I stepped through everything under
apr_proc_mutex_unix_setup_lock() and assigned to it before the code:

(gdb) set proc_mutex_op_try.sem_flg = 0
(gdb) n

Program received signal SIGSEGV, Segmentation fault.
apr_proc_mutex_unix_setup_lock () at locks/unix/proc_mutex.c:176
176 proc_mutex_op_try.sem_flg = SEM_UNDO | IPC_NOWAIT;

Sigh. That's annoying.

Here's the disassemled function for those interested:

0x100074d8 apr_proc_mutex_unix_setup_lock+0:  stwur1,-24(r1)
0x100074dc apr_proc_mutex_unix_setup_lock+4:  lwz r5,1092(r2)
0x100074e0 apr_proc_mutex_unix_setup_lock+8:  li  r0,6144
0x100074e4 apr_proc_mutex_unix_setup_lock+12: lwz r4,1096(r2)
0x100074e8 apr_proc_mutex_unix_setup_lock+16: lwz r8,1100(r2)
0x100074ec apr_proc_mutex_unix_setup_lock+20: li  r9,0
0x100074f0 apr_proc_mutex_unix_setup_lock+24: lwz r7,1104(r2)
0x100074f4 apr_proc_mutex_unix_setup_lock+28: li  r10,0
0x100074f8 apr_proc_mutex_unix_setup_lock+32: lwz r6,1088(r2)
0x100074fc apr_proc_mutex_unix_setup_lock+36: li  r3,-1
0x10007500 apr_proc_mutex_unix_setup_lock+40: li  r11,4096
0x10007504 apr_proc_mutex_unix_setup_lock+44: sth r0,4(r5)
0x10007508 apr_proc_mutex_unix_setup_lock+48: li  r0,1
0x1000750c apr_proc_mutex_unix_setup_lock+52: sth r3,2(r5)
0x10007510 apr_proc_mutex_unix_setup_lock+56: sth r11,4(r4)
0x10007514 apr_proc_mutex_unix_setup_lock+60: sth r10,2(r8)
0x10007518 apr_proc_mutex_unix_setup_lock+64: sth r0,2(r4)
0x1000751c apr_proc_mutex_unix_setup_lock+68: stw r9,16(r7)
0x10007520 apr_proc_mutex_unix_setup_lock+72: sth r11,4(r6)
0x10007524 apr_proc_mutex_unix_setup_lock+76: stw r9,16(r8)
0x10007528 apr_proc_mutex_unix_setup_lock+80: sth r10,0(r6)
0x1000752c apr_proc_mutex_unix_setup_lock+84: sth r3,2(r6)
0x10007530 apr_proc_mutex_unix_setup_lock+88: sth r10,0(r5)
0x10007534 apr_proc_mutex_unix_setup_lock+92: sth r10,0(r4)
0x10007538 apr_proc_mutex_unix_setup_lock+96: stw r9,4(r8)
0x1000753c apr_proc_mutex_unix_setup_lock+100:stw r9,8(r8)
0x10007540 apr_proc_mutex_unix_setup_lock+104:sth r9,2(r7)
0x10007544 apr_proc_mutex_unix_setup_lock+108:stw r9,4(r7)
0x10007548 apr_proc_mutex_unix_setup_lock+112:stw r9,8(r7)
0x1000754c apr_proc_mutex_unix_setup_lock+116:li  r0,2
0x10007550 apr_proc_mutex_unix_setup_lock+120:addir1,r1,24
0x10007554 apr_proc_mutex_unix_setup_lock+124:sth r0,0(r8)
0x10007558 apr_proc_mutex_unix_setup_lock+128:li  r0,3
0x1000755c apr_proc_mutex_unix_setup_lock+132:sth r0,0(r7)
0x10007560 apr_proc_mutex_unix_setup_lock+136:blr
0x10007564 apr_proc_mutex_unix_setup_lock+140:.long 0x0
0x10007568 apr_proc_mutex_unix_setup_lock+144:.long 0x2040
0x1000756c apr_proc_mutex_unix_setup_lock+148:lwz r0,0(r0)
0x10007570 apr_proc_mutex_unix_setup_lock+152:.long 0x8c
0x10007574 apr_proc_mutex_unix_setup_lock+156:.long 0x1e6170
0x10007578 apr_proc_mutex_unix_setup_lock+160:andi.
r31,r18,28786
0x1000757c apr_proc_mutex_unix_setup_lock+164:xoris   r3,r27,24429
0x10007580 apr_proc_mutex_unix_setup_lock+168:andis.
r20,r11,25976
0x10007584 apr_proc_mutex_unix_setup_lock+172:rlwnm.
r21,r27,r13,25,20
0x10007588 apr_proc_mutex_unix_setup_lock+176:rldicr. r31,r2,14,45
0x1000758c apr_proc_mutex_unix_setup_lock+180:andis.  r21,r3,28767
0x10007590 apr_proc_mutex_unix_setup_lock+184:xoris   r15,r3,25451

It's a little late my time to decipher this.

Thanks,
Gavin


Re: Segfault in testlockperf test on AIX

2009-06-17 Thread Bojan Smojver
On Thu, 2009-06-18 at 03:12 +0200, Gavin Sherry wrote:
 It's a little late my time to decipher this.

I'll bet on of of the load/store thingies. Try with si/ni to determine
where exactly it dies. Yeah, I know - not much help :-(

-- 
Bojan