Mladen Turk wrote:
> Hi,
>
> Since the WIN32 imposes pretty nasty limit on FD_SETSIZE to 64, that
> is way too low for any serious usage, I developed an alternative
> implementation.
> Also the code support the APR_POLLSET_THREADSAFE flag.
Coolness!
> A simple patch to apr_arch_poll_private.h allows to have
> multiple implementations compilable.
>
> Any comments?
Comments Bellow...
....
> #if POLL_USES_WEVENT
>
> struct apr_pollset_t
> {
> apr_pool_t *pool;
> apr_interval_time_t timeout;
> apr_uint32_t nelts;
> apr_uint32_t nalloc;
> HANDLE *hevents;
> SOCKET *sockets;
> int maxfd;
> apr_pollfd_t *query_set;
> apr_pollfd_t *result_set;
> LPCRITICAL_SECTION mutex;
Why can't this be an apr_thread_mutex?
....
> APR_DECLARE(apr_status_t) apr_pollset_create(apr_pollset_t **pollset,
> apr_uint32_t size,
> apr_pool_t *p,
> apr_uint32_t flags)
> {
> *pollset = apr_palloc(p, sizeof(**pollset));
> (*pollset)->nelts = 0;
> (*pollset)->nalloc = size;
> (*pollset)->pool = p;
> (*pollset)->maxfd = 0;
> (*pollset)->timeout = 0;
> (*pollset)->query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
> (*pollset)->result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
> (*pollset)->hevents = apr_palloc(p, size * sizeof(HANDLE));
> (*pollset)->sockets = apr_palloc(p, size * sizeof(SOCKET));
Okay, unlike the KQueue, Epoll and Ports pollsets, this one will not
dynamically grow if more than 'size' sockets are added. This is okay,
but is there a reason not to use the RINGs like the others?
....
> APR_DECLARE(apr_status_t) apr_pollset_add(apr_pollset_t *pollset,
> const apr_pollfd_t *descriptor)
> {
> apr_os_sock_t fd;
> DWORD reqevents = FD_CLOSE;
> if (pollset->nelts == pollset->nalloc) {
> return APR_ENOMEM;
> }
I believe you have to lock with the mutex before this check?
....
> APR_DECLARE(apr_status_t) apr_pollset_poll(apr_pollset_t *pollset,
> apr_interval_time_t timeout,
> apr_int32_t *num,
> const apr_pollfd_t **descriptors)
> {
> apr_uint32_t i, j = 0;
> apr_time_t stop_time;
> DWORD dw = WAIT_TIMEOUT;
> DWORD sleep_time = 0;
> if (timeout < 0)
> timeout = 0;
> stop_time = apr_time_now() + timeout;
> do {
> j = 0;
> if (sleep_time) {
> Sleep(sleep_time);
> /* Progresively increase sleep timeout
> * by 10%.
> */
> if (sleep_time < 1000)
> sleep_time += 10;
> }
> else
> sleep_time = 10;
> if (pollset->mutex)
> EnterCriticalSection(pollset->mutex);
> for (i = 0; i < pollset->nelts; i++) {
> WSANETWORKEVENTS ne;
> apr_int16_t rtnevents = 0;
> /* remove might invalidate the handle */
> if (pollset->hevents[i] == NULL ||
> (WSAEnumNetworkEvents(pollset->sockets[i],
> pollset->hevents[i],
> &ne) == SOCKET_ERROR)) {
> rtnevents = APR_POLLERR;
> ne.lNetworkEvents = 0;
> }
Hmm. I am not really happy with this loop. I don't think this will be
very fast with thousands of Sockets. I am far from an expert on Win32,
but why can't 'WSAWaitForMultipleEvents' be used, instead of iterating
every socket?
> if (ne.lNetworkEvents & FD_ACCEPT)
> rtnevents |= APR_POLLIN;
> if (ne.lNetworkEvents & FD_READ)
> rtnevents |= APR_POLLIN;
> if (ne.lNetworkEvents & FD_WRITE)
> rtnevents |= APR_POLLOUT;
> if (ne.lNetworkEvents & FD_QOS)
> rtnevents |= APR_POLLPRI;
> if (ne.lNetworkEvents & FD_CLOSE)
> rtnevents |= APR_POLLHUP;
> if (rtnevents) {
> pollset->result_set[j] = pollset->query_set[i];
> pollset->result_set[j].rtnevents = rtnevents;
> j++;
> }
> }
> if (pollset->mutex)
> LeaveCriticalSection(pollset->mutex);
> if (j > 0)
> break;
> } while ((apr_time_now() < stop_time));
Ack. If your sockets are idle, this will just spin the CPU -- I wish
this could be pushed down to a single system call, ala
WSAWaitForMultipleEvents, instead of us having to fake the timeout.
-Paul