cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java ChannelSocket.java

2005-09-24 Thread billbarker
billbarker2005/09/24 16:53:23

  Modified:jk/java/org/apache/jk/common ChannelNioSocket.java
ChannelSocket.java
  Log:
  Gracefully handle the case where some Socket options are disabled at the OS 
level.
  
  For reasons known only to Sun, Socket.setSoLinger actually throws an 
exception on some Solaris systems.  Since I'm betting that virtually nobody 
ever sets this option explictly, just log the error at DEBUG level and continue 
on.
  
  Revision  ChangesPath
  1.7   +17 -6 
jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java
  
  Index: ChannelNioSocket.java
  ===
  RCS file: 
/home/cvs/jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java,v
  retrieving revision 1.6
  retrieving revision 1.7
  diff -u -r1.6 -r1.7
  --- ChannelNioSocket.java 27 Jul 2005 15:12:01 -  1.6
  +++ ChannelNioSocket.java 24 Sep 2005 23:53:22 -  1.7
  @@ -34,6 +34,7 @@
   import java.net.InetSocketAddress;
   import java.net.ServerSocket;
   import java.net.Socket;
  +import java.net.SocketException;
   
   import javax.management.ListenerNotFoundException;
   import javax.management.MBeanNotificationInfo;
  @@ -320,12 +321,12 @@
   ep.setNote( socketNote, s );
   if(log.isDebugEnabled() )
   log.debug(Accepted socket  + s + channel   + 
sc.isBlocking());
  -if( linger  0 )
  -s.setSoLinger( true, linger);
  -if( socketTimeout  0 ) 
  -s.setSoTimeout( socketTimeout );
  -
  -s.setTcpNoDelay( tcpNoDelay ); // set socket tcpnodelay state
  +
  +try {
  +setSocketOptions(s);
  +} catch(SocketException sex) {
  +log.debug(Error initializing Socket Options, sex);
  +}
   
   requestCount++;
   
  @@ -337,6 +338,16 @@
   ep.setControl( tp );
   }
   
  +private void setSocketOptions(Socket s) throws SocketException {
  +if( socketTimeout  0 ) 
  +s.setSoTimeout( socketTimeout );
  +
  +s.setTcpNoDelay( tcpNoDelay ); // set socket tcpnodelay state
  +
  +if( linger  0 )
  +s.setSoLinger( true, linger);
  +}
  +
   public void resetCounters() {
   requestCount=0;
   }
  
  
  
  1.58  +16 -6 
jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelSocket.java
  
  Index: ChannelSocket.java
  ===
  RCS file: 
/home/cvs/jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelSocket.java,v
  retrieving revision 1.57
  retrieving revision 1.58
  diff -u -r1.57 -r1.58
  --- ChannelSocket.java27 Jul 2005 15:12:01 -  1.57
  +++ ChannelSocket.java24 Sep 2005 23:53:22 -  1.58
  @@ -294,12 +294,12 @@
   ep.setNote( socketNote, s );
   if(log.isDebugEnabled() )
   log.debug(Accepted socket  + s );
  -if( linger  0 )
  -s.setSoLinger( true, linger);
  -if( socketTimeout  0 ) 
  -s.setSoTimeout( socketTimeout );
  -
  -s.setTcpNoDelay( tcpNoDelay ); // set socket tcpnodelay state
  +
  +try {
  +setSocketOptions(s);
  +} catch(SocketException sex) {
  +log.debug(Error initializing Socket Options, sex);
  +}
   
   requestCount++;
   
  @@ -314,6 +314,16 @@
   ep.setControl( tp );
   }
   
  +private void setSocketOptions(Socket s) throws SocketException {
  +if( socketTimeout  0 ) 
  +s.setSoTimeout( socketTimeout );
  +
  +s.setTcpNoDelay( tcpNoDelay ); // set socket tcpnodelay state
  +
  +if( linger  0 )
  +s.setSoLinger( true, linger);
  +}
  +
   public void resetCounters() {
   requestCount=0;
   }
  
  
  

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java

2005-05-13 Thread billbarker
billbarker2005/05/13 20:27:19

  Modified:jk/java/org/apache/jk/common ChannelNioSocket.java
  Log:
  Checking in some stuff I've had here, before I start to fix Jk-Coyote so that 
it has a hope of working with Mark's patch.
  
  Now this uses direct ByteBuffers (makes a big difference :).
  
  Added a nioIsBroken flag, which if true seems to work around the NIO bugs in 
the Windows implementation.
  
  Solaris performance is actually pretty close to ChannelSocket now.  Windows 
is really slow.  Of course, it would need a lot more testing on more platforms 
before I'm willing to lift the 'experimental' label (and I still haven't found 
a case where it's better than ChannelSocket :).
  
  Revision  ChangesPath
  1.3   +164 -91   
jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java
  
  Index: ChannelNioSocket.java
  ===
  RCS file: 
/home/cvs/jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java,v
  retrieving revision 1.2
  retrieving revision 1.3
  diff -u -r1.2 -r1.3
  --- ChannelNioSocket.java 24 Apr 2005 04:27:42 -  1.2
  +++ ChannelNioSocket.java 14 May 2005 03:27:19 -  1.3
  @@ -99,6 +99,7 @@
   boolean tcpNoDelay=true; // nodelay to true by default
   int linger=100;
   int socketTimeout = 0;
  +boolean nioIsBroken = false;
   private Selector selector = null;
   
   long requestCount=0;
  @@ -241,15 +242,15 @@
   tp.setMaxThreads(i);
   }
   
  -public void setMinSpareThreads( int i ) {
  +public void setMinSpareThreads( int i ) {
   if( log.isDebugEnabled()) log.debug(Setting minSpareThreads  + i);
  -tp.setMinSpareThreads(i);
  -}
  +tp.setMinSpareThreads(i);
  +}
   
  -public void setMaxSpareThreads( int i ) {
  +public void setMaxSpareThreads( int i ) {
   if( log.isDebugEnabled()) log.debug(Setting maxSpareThreads  + i);
  -tp.setMaxSpareThreads(i);
  -}
  +tp.setMaxSpareThreads(i);
  +}
   
   public int getMaxThreads() {
   return tp.getMaxThreads();   
  @@ -266,6 +267,13 @@
   public void setBacklog(int i) {
   }
   
  +public void setNioIsBroken(boolean nib) {
  +nioIsBroken = nib;
  +}
  +
  +public boolean getNioIsBroken() {
  +return nioIsBroken;
  +}
   
   /*   */
   ServerSocket sSocket;
  @@ -278,7 +286,6 @@
   public void pause() throws Exception {
   synchronized(this) {
   paused = true;
  -//unLockSocket();
   }
   }
   
  @@ -501,17 +508,6 @@
   
   if(log.isTraceEnabled() )
   log.trace(send()  + len +   + buf[4] );
  -if(buf[4] == HandlerRequest.JK_AJP13_END_RESPONSE ) {
  -// After this goes out, the client may send a new request
  -// before the thread finishes, so tell the Poller that the
  -// next read is new
  -Socket s = (Socket)ep.getNote(socketNote);
  -SelectionKey key = s.getChannel().keyFor(selector);
  -if(key != null) {
  -SocketConnection sc = (SocketConnection)key.attachment();
  -sc.setFinished();
  -}
  -}
   
   OutputStream os=(OutputStream)ep.getNote( osNote );
   os.write( buf, 0, len );
  @@ -528,8 +524,8 @@
   public int receive( Msg msg, MsgContext ep )
   throws IOException
   {
  -if (log.isDebugEnabled()) {
  -log.debug(receive() );
  +if (log.isTraceEnabled()) {
  +log.trace(receive() );
   }
   
   byte buf[]=msg.getBuffer();
  @@ -636,8 +632,6 @@
   /** Accept incoming connections, dispatch to the thread pool
*/
   void acceptConnections() {
  -if( log.isDebugEnabled() )
  -log.debug(Accepting ajp connections on  + port);
   if( running ) {
   try{
   MsgContext ep=new MsgContext();
  @@ -674,8 +668,8 @@
   return flush( msg, ep );
   }
   
  -if( log.isDebugEnabled() )
  -log.debug(Call next  + type +   + next);
  +if( log.isTraceEnabled() )
  +log.trace(Call next  + type +   + next);
   
   // Send notification
   if( nSupport!=null ) {
  @@ -788,7 +782,6 @@
   this.ep=ep;
   }
   
  -
   public Object[] getInitData() {
   return null;
   }
  @@ -797,14 +790,13 @@
   if(!processConnection(ep)) {
   unregister(ep);
   }
  -setFinished();
   }
   
   public boolean isRunning() {
   return inProgress;
   }
   
  -public synchronized void setFinished() {
  +public 

Re: cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java

2005-04-24 Thread Remy Maucherat
[EMAIL PROTECTED] wrote:
billbarker2005/04/23 21:27:42
Modified:jk/java/org/apache/jk/common ChannelNioSocket.java Log: 
Give up on switching between blocking/non-blocking Sockets, also move
the Accecpt into the Poller instead of its own thread.

This is still very much experimental, and nobody should even dream of
using it in production.
Testing on Windows, it's very flakey.  On Solaris, it's stable
enough, but ChannelSocket is about 25% faster.
Does it do about the same of behavior as the APR endpoint ? (I didn't 
try it - I run Windoze for my dev, BTW ;) )

I would like AJP and HTTP to share more code. Do you think that NIO is a 
better choice, or should it remain an experiment ? To me, it seems less 
mature, robust and portable (the most annoying problem probably being 
that to get bugfixes and feature updates, you need to upgrade the JVM), 
but I don't have a whole lot of experience.

Rémy
-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]


Re: cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java

2005-04-24 Thread Bill Barker
- Original Message - 
From: Remy Maucherat [EMAIL PROTECTED]
To: Tomcat Developers List tomcat-dev@jakarta.apache.org
Sent: Sunday, April 24, 2005 1:49 AM
Subject: Re: cvs commit: 
jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java


[EMAIL PROTECTED] wrote:
billbarker2005/04/23 21:27:42
Modified:jk/java/org/apache/jk/common ChannelNioSocket.java Log: Give 
up on switching between blocking/non-blocking Sockets, also move
the Accecpt into the Poller instead of its own thread.

This is still very much experimental, and nobody should even dream of
using it in production.
Testing on Windows, it's very flakey.  On Solaris, it's stable
enough, but ChannelSocket is about 25% faster.
Does it do about the same of behavior as the APR endpoint ? (I didn't try 
it - I run Windoze for my dev, BTW ;) )

ChannelNioSocket works ok on Windows on low concurrency, but what's the 
point? ;-).  After that, NIO starts throwing weird NPEs.

I haven't tried the APR endpoint on Solaris yet.  If I get some time, I'll 
try compiling it and give it a spin.  However the NIO tests suggest that 
PoolTcpEndpoint will win. There is just not much downside to simply 
increasing maxThreads on Solaris, but the additional syncs and 
context-switching do cost.

I would like AJP and HTTP to share more code. Do you think that NIO is a 
better choice, or should it remain an experiment ? To me, it seems less 
mature, robust and portable (the most annoying problem probably being that 
to get bugfixes and feature updates, you need to upgrade the JVM), but I 
don't have a whole lot of experience.

Most of the split was because Costin designed Jk-Coyote around the JNI 
stuff. Also, at the time PoolTcpEndpoint didn't support the master-slave 
model he settled on.

Actually, I'm thinking of leaving ChannelNioSocket in mostly to have 
something to point to when people show up on the list asking why doesn't 
Tomcat use this great NIO stuff?. ;-)  The non-blocking io actually works 
well for AJP (since all of the reads and writes are at most 8K).  Using 
ByteBuffer vs byte [] seems mostly a matter of taste :).  Otherwise, I agree 
that NIO should probably remain an experiment for now.

Rémy
-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]


This message is intended only for the use of the person(s) listed above as the 
intended recipient(s), and may contain information that is PRIVILEGED and 
CONFIDENTIAL.  If you are not an intended recipient, you may not read, copy, or 
distribute this message or any attachment. If you received this communication 
in error, please notify us immediately by e-mail and then delete all copies of 
this message and any attachments.
In addition you should be aware that ordinary (unencrypted) e-mail sent through 
the Internet is not secure. Do not send confidential or sensitive information, 
such as social security numbers, account numbers, personal identification 
numbers and passwords, to us via ordinary (unencrypted) e-mail.

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]

Re: cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java

2005-04-24 Thread Remy Maucherat
Bill Barker wrote:
Does it do about the same of behavior as the APR endpoint ? (I didn't 
try it - I run Windoze for my dev, BTW ;) )
ChannelNioSocket works ok on Windows on low concurrency, but what's the 
point? ;-).  After that, NIO starts throwing weird NPEs.

I haven't tried the APR endpoint on Solaris yet.  If I get some time, 
I'll try compiling it and give it a spin.  However the NIO tests suggest 
that PoolTcpEndpoint will win. There is just not much downside to simply 
increasing maxThreads on Solaris, but the additional syncs and 
context-switching do cost.
That's a good question. On Windows, raw throughput (ab on localhost, I 
suck ...) is slower, but any kind of real test seem more or less 
equivalent, with sendfile bringing in gains. I don't know if this counts 
as real testing (probably not).
Linux is faster according to Mladen.
Solaris is untested, but would be interesting indeed.

I would like AJP and HTTP to share more code. Do you think that NIO is 
a better choice, or should it remain an experiment ? To me, it seems 
less mature, robust and portable (the most annoying problem probably 
being that to get bugfixes and feature updates, you need to upgrade 
the JVM), but I don't have a whole lot of experience.
Most of the split was because Costin designed Jk-Coyote around the JNI 
stuff. Also, at the time PoolTcpEndpoint didn't support the master-slave 
model he settled on.

Actually, I'm thinking of leaving ChannelNioSocket in mostly to have 
something to point to when people show up on the list asking why 
doesn't Tomcat use this great NIO stuff?. ;-)  The non-blocking io 
actually works well for AJP (since all of the reads and writes are at 
most 8K).  Using ByteBuffer vs byte [] seems mostly a matter of taste 
:).  Otherwise, I agree that NIO should probably remain an experiment 
for now.
Of course, it's good comparison, especially since it was done quickly.
Rémy
-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]


cvs commit: jakarta-tomcat-connectors/jk/java/org/apache/jk/common ChannelNioSocket.java

2005-04-23 Thread billbarker
billbarker2005/04/23 21:27:42

  Modified:jk/java/org/apache/jk/common ChannelNioSocket.java
  Log:
  Give up on switching between blocking/non-blocking Sockets, also move the 
Accecpt into the Poller instead of its own thread.
  
  This is still very much experimental, and nobody should even dream of using 
it in production.
  
  Testing on Windows, it's very flakey.  On Solaris, it's stable enough, but 
ChannelSocket is about 25% faster.
  
  Revision  ChangesPath
  1.2   +231 -80   
jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java
  
  Index: ChannelNioSocket.java
  ===
  RCS file: 
/home/cvs/jakarta-tomcat-connectors/jk/java/org/apache/jk/common/ChannelNioSocket.java,v
  retrieving revision 1.1
  retrieving revision 1.2
  diff -u -r1.1 -r1.2
  --- ChannelNioSocket.java 17 Apr 2005 03:41:08 -  1.1
  +++ ChannelNioSocket.java 24 Apr 2005 04:27:42 -  1.2
  @@ -18,19 +18,22 @@
   
   import java.util.Set;
   import java.util.Iterator;
  -import java.io.BufferedInputStream;
  -import java.io.BufferedOutputStream;
   import java.io.IOException;
   import java.io.InputStream;
   import java.io.OutputStream;
  +import java.nio.ByteBuffer;
   import java.nio.channels.Selector;
   import java.nio.channels.SelectionKey;
  +import java.nio.channels.SocketChannel;
   import java.nio.channels.ClosedSelectorException;
  +import java.nio.channels.ServerSocketChannel;
  +import java.nio.channels.CancelledKeyException;
  +import java.nio.channels.ClosedChannelException;
   import java.net.URLEncoder;
   import java.net.InetAddress;
  +import java.net.InetSocketAddress;
   import java.net.ServerSocket;
   import java.net.Socket;
  -import java.net.SocketException;
   
   import javax.management.ListenerNotFoundException;
   import javax.management.MBeanNotificationInfo;
  @@ -92,10 +95,10 @@
   int maxPort=8019; // 0 for backward compat.
   int port=startPort;
   InetAddress inet;
  -int serverTimeout;
  +int serverTimeout = 0;
   boolean tcpNoDelay=true; // nodelay to true by default
   int linger=100;
  -int socketTimeout;
  +int socketTimeout = 0;
   private Selector selector = null;
   
   long requestCount=0;
  @@ -105,7 +108,6 @@
  flush() is honored ( on my test, I got 367-433 RPS and
  52-35ms average time with a simple servlet )
   */
  -static final boolean BUFFER_WRITE=false;
   
   ThreadPool tp=ThreadPool.createThreadPool(true);
   
  @@ -271,12 +273,12 @@
   final int isNote=2;
   final int osNote=3;
   final int notifNote=4;
  -boolean paused = true;
  +boolean paused = false;
   
   public void pause() throws Exception {
   synchronized(this) {
   paused = true;
  -unLockSocket();
  +//unLockSocket();
   }
   }
   
  @@ -299,10 +301,11 @@
   }
   }
   }
  -Socket s=sSocket.accept();
  +SocketChannel sc=sSocket.getChannel().accept();
  +Socket s = sc.socket();
   ep.setNote( socketNote, s );
   if(log.isDebugEnabled() )
  -log.debug(Accepted socket  + s );
  +log.debug(Accepted socket  + s + channel   + 
sc.isBlocking());
   if( linger  0 )
   s.setSoLinger( true, linger);
   if( socketTimeout  0 ) 
  @@ -312,12 +315,9 @@
   
   requestCount++;
   
  -InputStream is=new BufferedInputStream(s.getInputStream());
  -OutputStream os;
  -if( BUFFER_WRITE )
  -os = new BufferedOutputStream( s.getOutputStream());
  -else
  -os = s.getOutputStream();
  +sc.configureBlocking(false);
  +InputStream is=new SocketInputStream(sc);
  +OutputStream os = new SocketOutputStream(sc);
   ep.setNote( isNote, is );
   ep.setNote( osNote, os );
   ep.setControl( tp );
  @@ -349,19 +349,24 @@
   }
   if (maxPort  startPort)
   maxPort = startPort;
  +ServerSocketChannel ssc = ServerSocketChannel.open();
  +ssc.configureBlocking(false);
   for( int i=startPort; i=maxPort; i++ ) {
   try {
  +InetSocketAddress iddr = null;
   if( inet == null ) {
  -sSocket = new ServerSocket( i, 0 );
  +iddr = new InetSocketAddress( i);
   } else {
  -sSocket=new ServerSocket( i, 0, inet );
  +iddr=new InetSocketAddress( inet, i);
   }
  +sSocket = ssc.socket();
  +sSocket.bind(iddr);
   port=i;
   break;
   } catch( IOException ex ) {
   if(log.isInfoEnabled())
   log.info(Port busy  + i +   +