You are viewing a plain text version of this content. The canonical link for it is here.
Posted to users@activemq.apache.org by Stefan Gmeiner <st...@pup.ch> on 2008/08/26 11:18:29 UTC

Slow performance of NMS api compared to Java

We are evaluating the NMS-API to connect a C# app to our ActiveMQ 
broker. For this we wrote a simple client which sends a request and 
waits for a reply (Client --> Broker --> Server --> Broker --> Client). 
The client/server C#-app runs in a single process with two different 
connections to the broker which resides on a different pc on the network.

This scenario takes about 200ms for each message transfered by the 
C#-API and less than 20ms by the Java-API although both do the same thing.

Does anybody have an idea what is going wrong or why there is such a big 
time differences?

Thank you for helping
Stefan


Code for the C# test app follows:
==========================================
using System;
using Apache.NMS;

namespace Test
{
	class SimpleTest
	{
		private static readonly String URI = "tcp://broker:61616";
		private static readonly String REQUEST_QUEUE = "test.request";

		private static DateTime startOffset;

		public static void Main()
		{
			IConnectionFactory factory = new NMSConnectionFactory(URI);
			
			SetUpReceiver(factory);
			SetUpSender(factory);

			Console.WriteLine("Press any key to quit.");
			Console.ReadKey();
		}


		private static void SetUpReceiver(IConnectionFactory factory)
		{
			// set up receiver
			IConnection rConnection = factory.CreateConnection();
			ISession rSession = rConnection.CreateSession();
			IMessageConsumer rConsumer = 
rSession.CreateConsumer(rSession.GetQueue(REQUEST_QUEUE));
			IMessageProducer rProducer = rSession.CreateProducer();
			rConsumer.Listener += delegate(IMessage message)
			{
				OnMessage(rSession, rProducer, message);
			};
			rConnection.Start();
		}

		private static void SetUpSender(IConnectionFactory factory)
		{
			IConnection sConnection = factory.CreateConnection();
			ISession sSession = sConnection.CreateSession();
			IMessageProducer sProducer = 
sSession.CreateProducer(sSession.GetQueue(REQUEST_QUEUE));
			IDestination replyDestination = sSession.CreateTemporaryQueue();
			IMessageConsumer sConsumer = sSession.CreateConsumer(replyDestination);
			sConnection.Start();

			for (int i = 0; i < 5; i++)
			{
				Console.WriteLine("Test " + i);

				// send message and wait for reply
				IMessage requestMsg = sSession.CreateTextMessage("Request" + i);
				requestMsg.NMSReplyTo = replyDestination;

				startOffset = DateTime.Now;

				sProducer.Send(requestMsg, false, NMSConstants.defaultPriority, 
NMSConstants.defaultTimeToLive);

				WriteTimedMessage("Request message sent");

				IMessage replyMsg = sConsumer.Receive();

				WriteTimedMessage("Reply message received");
			}
		}

		private static void OnMessage(ISession session, IMessageProducer 
producer, IMessage message)
		{
			WriteTimedMessage("Request message received");

			IMessage replyMsg = session.CreateTextMessage("Reply");
			producer.Send(message.NMSReplyTo, replyMsg, false, 
NMSConstants.defaultPriority, NMSConstants.defaultTimeToLive);

			WriteTimedMessage("Reply message sent");
		}


		private static void WriteTimedMessage(String message)
		{
			lock (typeof(SimpleTest))
			{
				TimeSpan diff = DateTime.Now - startOffset;
				Console.WriteLine("{0} ms: {1}", diff.TotalMilliseconds, message);
			}
		}
	}
}




Re: Slow performance of NMS api compared to Java

Posted by user939393 <mi...@tideworks.com>.
I had similar results as yours when performance testing NMS. You may want
evaluate IKVM for C# integration. Using IKVM, I had 4 times the message
throughput than NMS. Also, the converted jar -> dll gives you access to the
full JMS API for your C# producers and consumers.



Stefan Gmeiner wrote:
> 
> We are evaluating the NMS-API to connect a C# app to our ActiveMQ 
> broker. For this we wrote a simple client which sends a request and 
> waits for a reply (Client --> Broker --> Server --> Broker --> Client). 
> The client/server C#-app runs in a single process with two different 
> connections to the broker which resides on a different pc on the network.
> 
> This scenario takes about 200ms for each message transfered by the 
> C#-API and less than 20ms by the Java-API although both do the same thing.
> 
> Does anybody have an idea what is going wrong or why there is such a big 
> time differences?
> 
> Thank you for helping
> Stefan
> 
> 
> Code for the C# test app follows:
> ==========================================
> using System;
> using Apache.NMS;
> 
> namespace Test
> {
> 	class SimpleTest
> 	{
> 		private static readonly String URI = "tcp://broker:61616";
> 		private static readonly String REQUEST_QUEUE = "test.request";
> 
> 		private static DateTime startOffset;
> 
> 		public static void Main()
> 		{
> 			IConnectionFactory factory = new NMSConnectionFactory(URI);
> 			
> 			SetUpReceiver(factory);
> 			SetUpSender(factory);
> 
> 			Console.WriteLine("Press any key to quit.");
> 			Console.ReadKey();
> 		}
> 
> 
> 		private static void SetUpReceiver(IConnectionFactory factory)
> 		{
> 			// set up receiver
> 			IConnection rConnection = factory.CreateConnection();
> 			ISession rSession = rConnection.CreateSession();
> 			IMessageConsumer rConsumer = 
> rSession.CreateConsumer(rSession.GetQueue(REQUEST_QUEUE));
> 			IMessageProducer rProducer = rSession.CreateProducer();
> 			rConsumer.Listener += delegate(IMessage message)
> 			{
> 				OnMessage(rSession, rProducer, message);
> 			};
> 			rConnection.Start();
> 		}
> 
> 		private static void SetUpSender(IConnectionFactory factory)
> 		{
> 			IConnection sConnection = factory.CreateConnection();
> 			ISession sSession = sConnection.CreateSession();
> 			IMessageProducer sProducer = 
> sSession.CreateProducer(sSession.GetQueue(REQUEST_QUEUE));
> 			IDestination replyDestination = sSession.CreateTemporaryQueue();
> 			IMessageConsumer sConsumer = sSession.CreateConsumer(replyDestination);
> 			sConnection.Start();
> 
> 			for (int i = 0; i < 5; i++)
> 			{
> 				Console.WriteLine("Test " + i);
> 
> 				// send message and wait for reply
> 				IMessage requestMsg = sSession.CreateTextMessage("Request" + i);
> 				requestMsg.NMSReplyTo = replyDestination;
> 
> 				startOffset = DateTime.Now;
> 
> 				sProducer.Send(requestMsg, false, NMSConstants.defaultPriority, 
> NMSConstants.defaultTimeToLive);
> 
> 				WriteTimedMessage("Request message sent");
> 
> 				IMessage replyMsg = sConsumer.Receive();
> 
> 				WriteTimedMessage("Reply message received");
> 			}
> 		}
> 
> 		private static void OnMessage(ISession session, IMessageProducer 
> producer, IMessage message)
> 		{
> 			WriteTimedMessage("Request message received");
> 
> 			IMessage replyMsg = session.CreateTextMessage("Reply");
> 			producer.Send(message.NMSReplyTo, replyMsg, false, 
> NMSConstants.defaultPriority, NMSConstants.defaultTimeToLive);
> 
> 			WriteTimedMessage("Reply message sent");
> 		}
> 
> 
> 		private static void WriteTimedMessage(String message)
> 		{
> 			lock (typeof(SimpleTest))
> 			{
> 				TimeSpan diff = DateTime.Now - startOffset;
> 				Console.WriteLine("{0} ms: {1}", diff.TotalMilliseconds, message);
> 			}
> 		}
> 	}
> }
> 
> 
> 
> 
> 

-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19184345.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by semog <e....@gmail.com>.
I re-worked the sample test code that Stefan posted so I could use it to test
my changes.  I'm not sure really how to turn this in to a unit-test, since
it's simply an implementation evaluation test tool, and not really a
validation test.  Anyway, I had to change to use high-resolution timers,
because the DateTime.Now timer is only accurate to within 10-milliseconds. 
The margin of error is too great for this type of time test.  Here is the
modified source file:  http://www.nabble.com/file/p19208717/nmsspeedtest.cs
nmsspeedtest.cs .   (Caveat: this code will only work with my dev build due
to the patches I have made to it, but not checked in.  If the changes look
good, then this code will work after I commit them.)

Here are the results from running the same test scenario multiple times with
different URI connections.  I am sure this could be formatted very
effectively into a spreadsheet to make the comparisons easier to make.

Scenario: tcp://localhost:61616
Test 0
Request message sent	  7.416
Request message recv	 23.886
Reply message sent	189.227
Reply message recv	203.935
Test 1
Request message sent	 93.864
Request message recv	108.159
Reply message sent	312.878
Reply message recv	313.595
Test 2
Request message sent	217.790
Request message recv	218.346
Reply message sent	436.549
Reply message recv	437.026
Test 3
Request message sent	327.617
Request message recv	328.142
Reply message sent	546.322
Reply message recv	546.811
Test 4
Request message sent	218.215
Request message recv	218.910
Reply message sent	436.943
Reply message recv	437.724
Scenario: tcp://localhost:61616?wireformat.TcpNoDelayEnabled=true
Test 0
Request message sent	  0.917
Request message recv	  1.538
Reply message sent	193.286
Reply message recv	194.667
Test 1
Request message sent	217.301
Request message recv	217.859
Reply message sent	326.716
Reply message recv	327.204
Test 2
Request message sent	218.146
Request message recv	218.704
Reply message sent	436.883
Reply message recv	437.368
Test 3
Request message sent	218.257
Request message recv	218.806
Reply message sent	437.070
Reply message recv	437.551
Test 4
Request message sent	218.213
Request message recv	218.759
Reply message sent	436.799
Reply message recv	437.330
Scenario:
tcp://localhost:61616?wireformat.TcpNoDelayEnabled=true&transport.TcpNoDelayEnabled=true
Test 0
Request message sent	  0.927
Request message recv	  1.664
Reply message sent	  2.787
Reply message recv	  4.380
Test 1
Request message sent	  2.893
Request message recv	  3.137
Reply message sent	  3.998
Reply message recv	  4.451
Test 2
Request message sent	  0.740
Request message recv	  1.505
Reply message sent	  2.392
Reply message recv	  3.129
Test 3
Request message sent	  0.892
Request message recv	  1.560
Reply message sent	  3.860
Reply message recv	  4.113
Test 4
Request message sent	  1.944
Request message recv	  2.286
Reply message sent	  3.296
Reply message recv	  3.973
Scenario: tcp://localhost:61616?transport.TcpNoDelayEnabled=true
Test 0
Request message sent	  0.872
Request message recv	  1.488
Reply message sent	  2.360
Reply message recv	316.090
Test 1
Request message sent	  1.167
Request message recv	  1.879
Reply message sent	  2.810
Reply message recv	327.778
Test 2
Request message sent	  1.000
Request message recv	  1.714
Reply message sent	  2.591
Reply message recv	328.174
Test 3
Request message sent	  1.012
Request message recv	  1.614
Reply message sent	  2.534
Reply message recv	328.014
Test 4
Request message sent	  1.184
Request message recv	  2.145
Reply message sent	  3.108
Reply message recv	327.948
Scenario: tcp://localhost:61616?transport.FlushOnSend=false
Test 0
Request message sent	  0.929
Request message recv	  1.482
Reply message sent	186.404
Reply message recv	310.162
Test 1
Request message sent	208.501
Request message recv	209.045
Reply message sent	422.967
Reply message recv	437.409
Test 2
Request message sent	204.270
Request message recv	218.588
Reply message sent	423.038
Reply message recv	437.158
Test 3
Request message sent	204.557
Request message recv	218.779
Reply message sent	423.333
Reply message recv	423.817
Test 4
Request message sent	218.128
Request message recv	232.447
Reply message sent	546.299
Reply message recv	546.842
Scenario:
tcp://localhost:61616?transport.FlushOnSend=false&wireformat.TcpNoDelayEnabled=true
Test 0
Request message sent	  0.923
Request message recv	  1.492
Reply message sent	197.586
Reply message recv	198.975
Test 1
Request message sent	107.992
Request message recv	108.498
Reply message sent	326.686
Reply message recv	327.163
Test 2
Request message sent	218.160
Request message recv	218.718
Reply message sent	436.868
Reply message recv	437.356
Test 3
Request message sent	218.434
Request message recv	219.241
Reply message sent	436.955
Reply message recv	437.511
Test 4
Request message sent	108.951
Request message recv	109.811
Reply message sent	327.641
Reply message recv	328.263
Scenario:
tcp://localhost:61616?transport.FlushOnSend=false&wireformat.TcpNoDelayEnabled=true&transport.TcpNoDelayEnabled=true
Test 0
Request message sent	  1.541
Request message recv	  1.772
Reply message sent	  2.780
Reply message recv	  4.172
Test 1
Request message sent	  2.715
Request message recv	  2.944
Reply message sent	  3.763
Reply message recv	  4.260
Test 2
Request message sent	  3.308
Request message recv	  3.549
Reply message sent	  4.516
Reply message recv	  5.018
Test 3
Request message sent	  3.625
Request message recv	  3.847
Reply message sent	  4.795
Reply message recv	  5.251
Test 4
Request message sent	  2.847
Request message recv	  3.063
Reply message sent	  3.894
Reply message recv	  4.393
Scenario:
tcp://localhost:61616?transport.FlushOnSend=false&transport.TcpNoDelayEnabled=true
Test 0
Request message sent	  0.881
Request message recv	  2.312
Reply message sent	  3.592
Reply message recv	207.325
Test 1
Request message sent	  1.180
Request message recv	  4.225
Reply message sent	  5.124
Reply message recv	218.459
Test 2
Request message sent	  1.010
Request message recv	  1.670
Reply message sent	  2.613
Reply message recv	219.084
Test 3
Request message sent	  3.590
Request message recv	  3.829
Reply message sent	  4.762
Reply message recv	218.286
Test 4
Request message sent	  1.022
Request message recv	  1.743
Reply message sent	  2.655
Reply message recv	218.643

-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19208717.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by Vadim Chekan <ko...@gmail.com>.
Jim,
> "transport.TcpNoDelayEnabled=true"
seems perfect to me.

Vadim.


On Thu, Aug 28, 2008 at 11:15 AM, semog <e....@gmail.com> wrote:
>
> I took a look at your changes, and I think they are very good.  However, I
> want to discuss some changes I made to them to accommodate some of the
> constraint requirements that NMS has, as well as to safely introduce these
> changes.
>
> First, I would like to keep the default TcpNoDelayEnabled setting as false,
> rather than changing it to true.  Without knowing how it will impact other
> users, I think this is safer, and for those who want to tune their
> connection, they can do so.  If you can clarify how changing the default
> will help everyone in the general case, then we can change the default
> setting.  I just don't know what the full ramifications might be.
>
> Second, there are two sides to the communication -- the client socket and
> the broker socket.  The WireFormatInfo sets the broker's socket settings,
> and your changes wait for the renegotiateWireFormat to come back from the
> broker in order to set the TcpNoDelayEnabled setting on the client's socket.
> This will only work for OpenWire format, and not for STOMP.  Because of
> this, and other reasons, we need to have two separate setting flags.  I
> suggest the flag for setting the client's socket setting to be
> "transport.TcpNoDelayEnabled".  As you already know, the existing broker's
> setting is "wireformat.TcpNoDelayEnabled".  Since this is wireformat
> specific, it is better to have a separate setting that can directly control
> the client's socket settings.
>
> Also, for Vadim, my previous post that showed "connection.NoDelay=true" was
> incorrect.  It should have been "transport.NoDelay=true".  However, that has
> been changed to "transport.TcpNoDelayEnabled=true" to be consistent with the
> existing "wireformat.TcpNoDelayEnabled" parameter.
>
> This may be a bit confusing, so let me throw out some sample connection
> URIs.  I propose that nothing change to the existing defaults.  If a user
> wants to override the settings, here is how it would look:
>
> activemq:tcp://localhost:61616?transport.TcpNoDelayEnabled=true
>
> This would set the client-side socket to turn on the NoDelay flag.
>
> activemq:tcp://localhost:61616?transport.TcpNoDelayEnabled=true&wireformat.TcpNoDelayEnabled=true
>
> This would set both the client side and broker side sockets to turn on the
> NoDelay flag.
>
> I haven't committed these changes yet.  I'd like to get your feedback on
> this approach.
>
> Best,
> Jim
>
>
>
> semog wrote:
>>
>> Hi Stefan,
>>
>> Thanks for fixing the license.  I'll take a look at the code changes.
>> This
>> is one of those areas where I think we have the potential for speeding up
>> the code, but also the potential for creating subtle problems.  I'd like
>> to
>> make sure that these changes get tested very closely.
>>
>> The changes I put in change the start-up negotiation, at least that is the
>> intention. :)  I'll send a message when the code is in there.  Thanks
>> again
>> for contributing!
>>
>> - Jim
>>
>> On Thu, Aug 28, 2008 at 12:59 AM, Stefan_ <st...@pup.ch> wrote:
>>
>>>
>>> Hi Jim,
>>>
>>> Sorry, but I simple overlooked the license agreement. I uploaded the
>>> patch
>>> again with the ASF license.
>>>
>>> I think adding the option to the URI is a good thing but Open Wire
>>> already
>>> negotiated the NoDelay option at connection start up. Unfortunately NMS
>>> hasn't yet supported this option so both the broker and the client use
>>> the
>>> Nagle algorithm which results in delays when sending messages.
>>>
>>> BTW I also removed the flush method call after writing to the socket as I
>>> recognized that this call also slows down the throughput.
>>>
>>> I would be very happy if the patch could be integrated into the source.
>>>
>>> Stefan
>>>
>>>
>>>
>>> semog wrote:
>>> >
>>> > Since I couldn't look at your code because of the license grant issue,
>>> > I looked in to what you had mentioned about the NoDelay option.  I
>>> > took a stab at adding support for turning this (and several other
>>> > socket transport options) on and off from the connection URI.  Once
>>> > you fix the license grant, I can look at your patch and integrate it
>>> > in with my changes.
>>> >
>>> > The solution I am playing with would look like this:
>>> >
>>> > activemq:tcp://localhost:61616?connection.NoDelay=true
>>> >
>>> > This would turn off the Nagle algorithm on the socket connection.
>>> >
>>> > Thanks!
>>> > -Jim
>>> >
>>> >
>>>
>>> --
>>> View this message in context:
>>> http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19195937.html
>>> Sent from the ActiveMQ - User mailing list archive at Nabble.com.
>>>
>>>
>>
>>
>
> --
> View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19205566.html
> Sent from the ActiveMQ - User mailing list archive at Nabble.com.
>
>



-- 
>From RFC 2631: In ASN.1, EXPLICIT tagging is implicit unless IMPLICIT
is explicitly specified

Re: Slow performance of NMS api compared to Java

Posted by Stefan_ <st...@pup.ch>.
Jim,

I checked out the code and made some quick tests with the expected correct
results.

Thanks
Stefan



semog wrote:
> 
> I made the change in the code to turn this on by default based on your
> contribution.  I made a few minor tweaks for .NET 1.1, but that's about
> it.
>  Thanks for catching this and contributing the code patch!
> 
> Let me know if I didn't merge in your changes correctly.  They should be
> included in the 1.0.0 tag branch as well as in the trunk branch.
> 

-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19453523.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by Jim Gomes <e....@gmail.com>.
Hi Stefan,
The point of Java having this setting on by default is very strong.  Also,
the point that both sides needs to be turned on in order for it to be
effective makes a lot of sense, and also agrees with my observed test
results.

I made the change in the code to turn this on by default based on your
contribution.  I made a few minor tweaks for .NET 1.1, but that's about it.
 Thanks for catching this and contributing the code patch!

Let me know if I didn't merge in your changes correctly.  They should be
included in the 1.0.0 tag branch as well as in the trunk branch.

Best,
Jim

On Fri, Aug 29, 2008 at 12:22 AM, Stefan_ <st...@pup.ch> wrote:

>
> Hi Jim,
>
>
> semog wrote:
> >
> > First, I would like to keep the default TcpNoDelayEnabled setting as
> > false, rather than changing it to true.  Without knowing how it will
> > impact other users, I think this is safer, and for those who want to tune
> > their connection, they can do so.  If you can clarify how changing the
> > default will help everyone in the general case, then we can change the
> > default setting.  I just don't know what the full ramifications might be.
> >
>
> I understand that changing a default fault is not always appropiate, but
> the
> java API also uses the TcpNoDelayEnabled setting with true default value
> and
> I think that the user of the API might not understand why there are
> different default settings. BTW the NoDelay-Flag is a socket option from
> the
> underlying os so setting this through java or .NET should make no
> difference
> and hence should have no other impact as it has for the java
> implementation.
>
>
>
>
> > Second, there are two sides to the communication -- the client socket and
> > the broker socket.  The WireFormatInfo sets the broker's socket settings,
> > and your changes wait for the renegotiateWireFormat to come back from the
> > broker in order to set the TcpNoDelayEnabled setting on the client's
> > socket.  This
> >
>
> I understand that the NoDelay Flag is set either on both or on neither. For
> this reason the received WireFormat from the broker is compared to the
> client default setting and only if both parties enable the flag and are
> capable will the option be activated on the connection. AFAIK there is no
> point in enabling this option on one client if the other client is not
> capable or willing.
>
>
>
>
> > will only work for OpenWire format, and not for STOMP.  Because of this,
> > and other reasons, we need to have two separate setting flags.  I suggest
> > the flag for setting the client's socket setting to be
> > "transport.TcpNoDelayEnabled".  As you already know, the existing
> broker's
> > setting is "wireformat.TcpNoDelayEnabled".  Since this is wireformat
> > specific, it is better to have a separate setting that can directly
> > control the client's socket settings.
> >
>
> AFAIK the java implementation doesn't use the NoDelay-Flag for the stomp
> protocol (at least I haven't found one). In my understanding it would only
> be useful if both sockets support this option, so it seems not yet
> necessary
> to make this option available for the STOMP protocol (perhaps in the
> future).
>
> Stefan
> --
> View this message in context:
> http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19214979.html
> Sent from the ActiveMQ - User mailing list archive at Nabble.com.
>
>

Re: Slow performance of NMS api compared to Java

Posted by Stefan_ <st...@pup.ch>.
Hi Jim,


semog wrote:
> 
> First, I would like to keep the default TcpNoDelayEnabled setting as
> false, rather than changing it to true.  Without knowing how it will
> impact other users, I think this is safer, and for those who want to tune
> their connection, they can do so.  If you can clarify how changing the
> default will help everyone in the general case, then we can change the
> default setting.  I just don't know what the full ramifications might be.
> 

I understand that changing a default fault is not always appropiate, but the
java API also uses the TcpNoDelayEnabled setting with true default value and
I think that the user of the API might not understand why there are
different default settings. BTW the NoDelay-Flag is a socket option from the
underlying os so setting this through java or .NET should make no difference
and hence should have no other impact as it has for the java implementation.




> Second, there are two sides to the communication -- the client socket and
> the broker socket.  The WireFormatInfo sets the broker's socket settings,
> and your changes wait for the renegotiateWireFormat to come back from the
> broker in order to set the TcpNoDelayEnabled setting on the client's
> socket.  This 
> 

I understand that the NoDelay Flag is set either on both or on neither. For
this reason the received WireFormat from the broker is compared to the
client default setting and only if both parties enable the flag and are
capable will the option be activated on the connection. AFAIK there is no
point in enabling this option on one client if the other client is not
capable or willing.




> will only work for OpenWire format, and not for STOMP.  Because of this,
> and other reasons, we need to have two separate setting flags.  I suggest
> the flag for setting the client's socket setting to be 
> "transport.TcpNoDelayEnabled".  As you already know, the existing broker's
> setting is "wireformat.TcpNoDelayEnabled".  Since this is wireformat
> specific, it is better to have a separate setting that can directly
> control the client's socket settings.
> 

AFAIK the java implementation doesn't use the NoDelay-Flag for the stomp
protocol (at least I haven't found one). In my understanding it would only
be useful if both sockets support this option, so it seems not yet necessary
to make this option available for the STOMP protocol (perhaps in the
future).

Stefan
-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19214979.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by semog <e....@gmail.com>.
I took a look at your changes, and I think they are very good.  However, I
want to discuss some changes I made to them to accommodate some of the
constraint requirements that NMS has, as well as to safely introduce these
changes.

First, I would like to keep the default TcpNoDelayEnabled setting as false,
rather than changing it to true.  Without knowing how it will impact other
users, I think this is safer, and for those who want to tune their
connection, they can do so.  If you can clarify how changing the default
will help everyone in the general case, then we can change the default
setting.  I just don't know what the full ramifications might be.

Second, there are two sides to the communication -- the client socket and
the broker socket.  The WireFormatInfo sets the broker's socket settings,
and your changes wait for the renegotiateWireFormat to come back from the
broker in order to set the TcpNoDelayEnabled setting on the client's socket. 
This will only work for OpenWire format, and not for STOMP.  Because of
this, and other reasons, we need to have two separate setting flags.  I
suggest the flag for setting the client's socket setting to be
"transport.TcpNoDelayEnabled".  As you already know, the existing broker's
setting is "wireformat.TcpNoDelayEnabled".  Since this is wireformat
specific, it is better to have a separate setting that can directly control
the client's socket settings.

Also, for Vadim, my previous post that showed "connection.NoDelay=true" was
incorrect.  It should have been "transport.NoDelay=true".  However, that has
been changed to "transport.TcpNoDelayEnabled=true" to be consistent with the
existing "wireformat.TcpNoDelayEnabled" parameter.

This may be a bit confusing, so let me throw out some sample connection
URIs.  I propose that nothing change to the existing defaults.  If a user
wants to override the settings, here is how it would look:

activemq:tcp://localhost:61616?transport.TcpNoDelayEnabled=true

This would set the client-side socket to turn on the NoDelay flag.

activemq:tcp://localhost:61616?transport.TcpNoDelayEnabled=true&wireformat.TcpNoDelayEnabled=true

This would set both the client side and broker side sockets to turn on the
NoDelay flag.

I haven't committed these changes yet.  I'd like to get your feedback on
this approach.

Best,
Jim



semog wrote:
> 
> Hi Stefan,
> 
> Thanks for fixing the license.  I'll take a look at the code changes. 
> This
> is one of those areas where I think we have the potential for speeding up
> the code, but also the potential for creating subtle problems.  I'd like
> to
> make sure that these changes get tested very closely.
> 
> The changes I put in change the start-up negotiation, at least that is the
> intention. :)  I'll send a message when the code is in there.  Thanks
> again
> for contributing!
> 
> - Jim
> 
> On Thu, Aug 28, 2008 at 12:59 AM, Stefan_ <st...@pup.ch> wrote:
> 
>>
>> Hi Jim,
>>
>> Sorry, but I simple overlooked the license agreement. I uploaded the
>> patch
>> again with the ASF license.
>>
>> I think adding the option to the URI is a good thing but Open Wire
>> already
>> negotiated the NoDelay option at connection start up. Unfortunately NMS
>> hasn't yet supported this option so both the broker and the client use
>> the
>> Nagle algorithm which results in delays when sending messages.
>>
>> BTW I also removed the flush method call after writing to the socket as I
>> recognized that this call also slows down the throughput.
>>
>> I would be very happy if the patch could be integrated into the source.
>>
>> Stefan
>>
>>
>>
>> semog wrote:
>> >
>> > Since I couldn't look at your code because of the license grant issue,
>> > I looked in to what you had mentioned about the NoDelay option.  I
>> > took a stab at adding support for turning this (and several other
>> > socket transport options) on and off from the connection URI.  Once
>> > you fix the license grant, I can look at your patch and integrate it
>> > in with my changes.
>> >
>> > The solution I am playing with would look like this:
>> >
>> > activemq:tcp://localhost:61616?connection.NoDelay=true
>> >
>> > This would turn off the Nagle algorithm on the socket connection.
>> >
>> > Thanks!
>> > -Jim
>> >
>> >
>>
>> --
>> View this message in context:
>> http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19195937.html
>> Sent from the ActiveMQ - User mailing list archive at Nabble.com.
>>
>>
> 
> 

-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19205566.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by Jim Gomes <e....@gmail.com>.
Hi Stefan,

Thanks for fixing the license.  I'll take a look at the code changes.  This
is one of those areas where I think we have the potential for speeding up
the code, but also the potential for creating subtle problems.  I'd like to
make sure that these changes get tested very closely.

The changes I put in change the start-up negotiation, at least that is the
intention. :)  I'll send a message when the code is in there.  Thanks again
for contributing!

- Jim

On Thu, Aug 28, 2008 at 12:59 AM, Stefan_ <st...@pup.ch> wrote:

>
> Hi Jim,
>
> Sorry, but I simple overlooked the license agreement. I uploaded the patch
> again with the ASF license.
>
> I think adding the option to the URI is a good thing but Open Wire already
> negotiated the NoDelay option at connection start up. Unfortunately NMS
> hasn't yet supported this option so both the broker and the client use the
> Nagle algorithm which results in delays when sending messages.
>
> BTW I also removed the flush method call after writing to the socket as I
> recognized that this call also slows down the throughput.
>
> I would be very happy if the patch could be integrated into the source.
>
> Stefan
>
>
>
> semog wrote:
> >
> > Since I couldn't look at your code because of the license grant issue,
> > I looked in to what you had mentioned about the NoDelay option.  I
> > took a stab at adding support for turning this (and several other
> > socket transport options) on and off from the connection URI.  Once
> > you fix the license grant, I can look at your patch and integrate it
> > in with my changes.
> >
> > The solution I am playing with would look like this:
> >
> > activemq:tcp://localhost:61616?connection.NoDelay=true
> >
> > This would turn off the Nagle algorithm on the socket connection.
> >
> > Thanks!
> > -Jim
> >
> >
>
> --
> View this message in context:
> http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19195937.html
> Sent from the ActiveMQ - User mailing list archive at Nabble.com.
>
>

Re: Slow performance of NMS api compared to Java

Posted by Stefan_ <st...@pup.ch>.
Hi Jim, 

Sorry, but I simple overlooked the license agreement. I uploaded the patch
again with the ASF license.

I think adding the option to the URI is a good thing but Open Wire already
negotiated the NoDelay option at connection start up. Unfortunately NMS
hasn't yet supported this option so both the broker and the client use the
Nagle algorithm which results in delays when sending messages.

BTW I also removed the flush method call after writing to the socket as I
recognized that this call also slows down the throughput.

I would be very happy if the patch could be integrated into the source.

Stefan



semog wrote:
> 
> Since I couldn't look at your code because of the license grant issue,
> I looked in to what you had mentioned about the NoDelay option.  I
> took a stab at adding support for turning this (and several other
> socket transport options) on and off from the connection URI.  Once
> you fix the license grant, I can look at your patch and integrate it
> in with my changes.
> 
> The solution I am playing with would look like this:
> 
> activemq:tcp://localhost:61616?connection.NoDelay=true
> 
> This would turn off the Nagle algorithm on the socket connection.
> 
> Thanks!
> -Jim
> 
> 

-- 
View this message in context: http://www.nabble.com/Slow-performance-of-NMS-api-compared-to-Java-tp19158553p19195937.html
Sent from the ActiveMQ - User mailing list archive at Nabble.com.


Re: Slow performance of NMS api compared to Java

Posted by James Strachan <ja...@gmail.com>.
2008/8/28 Vadim Chekan <ko...@gmail.com>:
> Would it be property of connection or transport?
> I thought connection means JMS connection and it has nothing to do with TCP.

FWIW a JMS Connection typically has a TCP connection underneath. We
often use the connection URL to configure things like transport, TCP
settings and so forth.
-- 
James
-------
http://macstrac.blogspot.com/

Open Source Integration
http://open.iona.com

Re: Slow performance of NMS api compared to Java

Posted by Vadim Chekan <ko...@gmail.com>.
Would it be property of connection or transport?
I thought connection means JMS connection and it has nothing to do with TCP.

Vadim.

On Wed, Aug 27, 2008 at 10:33 PM, Jim Gomes <e....@gmail.com> wrote:
> Since I couldn't look at your code because of the license grant issue,
> I looked in to what you had mentioned about the NoDelay option.  I
> took a stab at adding support for turning this (and several other
> socket transport options) on and off from the connection URI.  Once
> you fix the license grant, I can look at your patch and integrate it
> in with my changes.
>
> The solution I am playing with would look like this:
>
> activemq:tcp://localhost:61616?connection.NoDelay=true
>
> This would turn off the Nagle algorithm on the socket connection.
>
> Thanks!
> -Jim
>
>
>
> On 8/27/08, Jim Gomes <e....@gmail.com> wrote:
>> Hi Stefan,
>>
>> Thanks for creating Jira AMQNET-109 and attaching the patch.  However,
>> the Grant ASF License option was not checked.  Would you re-attach the
>> patch and check that option?  I can then look at integrating it into
>> the codebase.
>>
>> Thanks!
>> -Jim
>>
>>
>>
>> On 8/26/08, Stefan Gmeiner <st...@pup.ch> wrote:
>>> We are evaluating the NMS-API to connect a C# app to our ActiveMQ
>>> broker. For this we wrote a simple client which sends a request and
>>> waits for a reply (Client --> Broker --> Server --> Broker --> Client).
>>> The client/server C#-app runs in a single process with two different
>>> connections to the broker which resides on a different pc on the network.
>>>
>>> This scenario takes about 200ms for each message transfered by the
>>> C#-API and less than 20ms by the Java-API although both do the same thing.
>>>
>>> Does anybody have an idea what is going wrong or why there is such a big
>>> time differences?
>>>
>>> Thank you for helping
>>> Stefan
>>>
>>>
>>> Code for the C# test app follows:
>>> ==========================================
>>> using System;
>>> using Apache.NMS;
>>>
>>> namespace Test
>>> {
>>>      class SimpleTest
>>>      {
>>>              private static readonly String URI = "tcp://broker:61616";
>>>              private static readonly String REQUEST_QUEUE = "test.request";
>>>
>>>              private static DateTime startOffset;
>>>
>>>              public static void Main()
>>>              {
>>>                      IConnectionFactory factory = new NMSConnectionFactory(URI);
>>>
>>>                      SetUpReceiver(factory);
>>>                      SetUpSender(factory);
>>>
>>>                      Console.WriteLine("Press any key to quit.");
>>>                      Console.ReadKey();
>>>              }
>>>
>>>
>>>              private static void SetUpReceiver(IConnectionFactory factory)
>>>              {
>>>                      // set up receiver
>>>                      IConnection rConnection = factory.CreateConnection();
>>>                      ISession rSession = rConnection.CreateSession();
>>>                      IMessageConsumer rConsumer =
>>> rSession.CreateConsumer(rSession.GetQueue(REQUEST_QUEUE));
>>>                      IMessageProducer rProducer = rSession.CreateProducer();
>>>                      rConsumer.Listener += delegate(IMessage message)
>>>                      {
>>>                              OnMessage(rSession, rProducer, message);
>>>                      };
>>>                      rConnection.Start();
>>>              }
>>>
>>>              private static void SetUpSender(IConnectionFactory factory)
>>>              {
>>>                      IConnection sConnection = factory.CreateConnection();
>>>                      ISession sSession = sConnection.CreateSession();
>>>                      IMessageProducer sProducer =
>>> sSession.CreateProducer(sSession.GetQueue(REQUEST_QUEUE));
>>>                      IDestination replyDestination = sSession.CreateTemporaryQueue();
>>>                      IMessageConsumer sConsumer = sSession.CreateConsumer(replyDestination);
>>>                      sConnection.Start();
>>>
>>>                      for (int i = 0; i < 5; i++)
>>>                      {
>>>                              Console.WriteLine("Test " + i);
>>>
>>>                              // send message and wait for reply
>>>                              IMessage requestMsg = sSession.CreateTextMessage("Request" + i);
>>>                              requestMsg.NMSReplyTo = replyDestination;
>>>
>>>                              startOffset = DateTime.Now;
>>>
>>>                              sProducer.Send(requestMsg, false, NMSConstants.defaultPriority,
>>> NMSConstants.defaultTimeToLive);
>>>
>>>                              WriteTimedMessage("Request message sent");
>>>
>>>                              IMessage replyMsg = sConsumer.Receive();
>>>
>>>                              WriteTimedMessage("Reply message received");
>>>                      }
>>>              }
>>>
>>>              private static void OnMessage(ISession session, IMessageProducer
>>> producer, IMessage message)
>>>              {
>>>                      WriteTimedMessage("Request message received");
>>>
>>>                      IMessage replyMsg = session.CreateTextMessage("Reply");
>>>                      producer.Send(message.NMSReplyTo, replyMsg, false,
>>> NMSConstants.defaultPriority, NMSConstants.defaultTimeToLive);
>>>
>>>                      WriteTimedMessage("Reply message sent");
>>>              }
>>>
>>>
>>>              private static void WriteTimedMessage(String message)
>>>              {
>>>                      lock (typeof(SimpleTest))
>>>                      {
>>>                              TimeSpan diff = DateTime.Now - startOffset;
>>>                              Console.WriteLine("{0} ms: {1}", diff.TotalMilliseconds, message);
>>>                      }
>>>              }
>>>      }
>>> }
>>>
>>>
>>>
>>>
>>
>



-- 
>From RFC 2631: In ASN.1, EXPLICIT tagging is implicit unless IMPLICIT
is explicitly specified

Re: Slow performance of NMS api compared to Java

Posted by Jim Gomes <e....@gmail.com>.
Since I couldn't look at your code because of the license grant issue,
I looked in to what you had mentioned about the NoDelay option.  I
took a stab at adding support for turning this (and several other
socket transport options) on and off from the connection URI.  Once
you fix the license grant, I can look at your patch and integrate it
in with my changes.

The solution I am playing with would look like this:

activemq:tcp://localhost:61616?connection.NoDelay=true

This would turn off the Nagle algorithm on the socket connection.

Thanks!
-Jim



On 8/27/08, Jim Gomes <e....@gmail.com> wrote:
> Hi Stefan,
>
> Thanks for creating Jira AMQNET-109 and attaching the patch.  However,
> the Grant ASF License option was not checked.  Would you re-attach the
> patch and check that option?  I can then look at integrating it into
> the codebase.
>
> Thanks!
> -Jim
>
>
>
> On 8/26/08, Stefan Gmeiner <st...@pup.ch> wrote:
>> We are evaluating the NMS-API to connect a C# app to our ActiveMQ
>> broker. For this we wrote a simple client which sends a request and
>> waits for a reply (Client --> Broker --> Server --> Broker --> Client).
>> The client/server C#-app runs in a single process with two different
>> connections to the broker which resides on a different pc on the network.
>>
>> This scenario takes about 200ms for each message transfered by the
>> C#-API and less than 20ms by the Java-API although both do the same thing.
>>
>> Does anybody have an idea what is going wrong or why there is such a big
>> time differences?
>>
>> Thank you for helping
>> Stefan
>>
>>
>> Code for the C# test app follows:
>> ==========================================
>> using System;
>> using Apache.NMS;
>>
>> namespace Test
>> {
>> 	class SimpleTest
>> 	{
>> 		private static readonly String URI = "tcp://broker:61616";
>> 		private static readonly String REQUEST_QUEUE = "test.request";
>>
>> 		private static DateTime startOffset;
>>
>> 		public static void Main()
>> 		{
>> 			IConnectionFactory factory = new NMSConnectionFactory(URI);
>> 			
>> 			SetUpReceiver(factory);
>> 			SetUpSender(factory);
>>
>> 			Console.WriteLine("Press any key to quit.");
>> 			Console.ReadKey();
>> 		}
>>
>>
>> 		private static void SetUpReceiver(IConnectionFactory factory)
>> 		{
>> 			// set up receiver
>> 			IConnection rConnection = factory.CreateConnection();
>> 			ISession rSession = rConnection.CreateSession();
>> 			IMessageConsumer rConsumer =
>> rSession.CreateConsumer(rSession.GetQueue(REQUEST_QUEUE));
>> 			IMessageProducer rProducer = rSession.CreateProducer();
>> 			rConsumer.Listener += delegate(IMessage message)
>> 			{
>> 				OnMessage(rSession, rProducer, message);
>> 			};
>> 			rConnection.Start();
>> 		}
>>
>> 		private static void SetUpSender(IConnectionFactory factory)
>> 		{
>> 			IConnection sConnection = factory.CreateConnection();
>> 			ISession sSession = sConnection.CreateSession();
>> 			IMessageProducer sProducer =
>> sSession.CreateProducer(sSession.GetQueue(REQUEST_QUEUE));
>> 			IDestination replyDestination = sSession.CreateTemporaryQueue();
>> 			IMessageConsumer sConsumer = sSession.CreateConsumer(replyDestination);
>> 			sConnection.Start();
>>
>> 			for (int i = 0; i < 5; i++)
>> 			{
>> 				Console.WriteLine("Test " + i);
>>
>> 				// send message and wait for reply
>> 				IMessage requestMsg = sSession.CreateTextMessage("Request" + i);
>> 				requestMsg.NMSReplyTo = replyDestination;
>>
>> 				startOffset = DateTime.Now;
>>
>> 				sProducer.Send(requestMsg, false, NMSConstants.defaultPriority,
>> NMSConstants.defaultTimeToLive);
>>
>> 				WriteTimedMessage("Request message sent");
>>
>> 				IMessage replyMsg = sConsumer.Receive();
>>
>> 				WriteTimedMessage("Reply message received");
>> 			}
>> 		}
>>
>> 		private static void OnMessage(ISession session, IMessageProducer
>> producer, IMessage message)
>> 		{
>> 			WriteTimedMessage("Request message received");
>>
>> 			IMessage replyMsg = session.CreateTextMessage("Reply");
>> 			producer.Send(message.NMSReplyTo, replyMsg, false,
>> NMSConstants.defaultPriority, NMSConstants.defaultTimeToLive);
>>
>> 			WriteTimedMessage("Reply message sent");
>> 		}
>>
>>
>> 		private static void WriteTimedMessage(String message)
>> 		{
>> 			lock (typeof(SimpleTest))
>> 			{
>> 				TimeSpan diff = DateTime.Now - startOffset;
>> 				Console.WriteLine("{0} ms: {1}", diff.TotalMilliseconds, message);
>> 			}
>> 		}
>> 	}
>> }
>>
>>
>>
>>
>

Re: Slow performance of NMS api compared to Java

Posted by Jim Gomes <e....@gmail.com>.
Hi Stefan,

Thanks for creating Jira AMQNET-109 and attaching the patch.  However,
the Grant ASF License option was not checked.  Would you re-attach the
patch and check that option?  I can then look at integrating it into
the codebase.

Thanks!
-Jim



On 8/26/08, Stefan Gmeiner <st...@pup.ch> wrote:
> We are evaluating the NMS-API to connect a C# app to our ActiveMQ
> broker. For this we wrote a simple client which sends a request and
> waits for a reply (Client --> Broker --> Server --> Broker --> Client).
> The client/server C#-app runs in a single process with two different
> connections to the broker which resides on a different pc on the network.
>
> This scenario takes about 200ms for each message transfered by the
> C#-API and less than 20ms by the Java-API although both do the same thing.
>
> Does anybody have an idea what is going wrong or why there is such a big
> time differences?
>
> Thank you for helping
> Stefan
>
>
> Code for the C# test app follows:
> ==========================================
> using System;
> using Apache.NMS;
>
> namespace Test
> {
> 	class SimpleTest
> 	{
> 		private static readonly String URI = "tcp://broker:61616";
> 		private static readonly String REQUEST_QUEUE = "test.request";
>
> 		private static DateTime startOffset;
>
> 		public static void Main()
> 		{
> 			IConnectionFactory factory = new NMSConnectionFactory(URI);
> 			
> 			SetUpReceiver(factory);
> 			SetUpSender(factory);
>
> 			Console.WriteLine("Press any key to quit.");
> 			Console.ReadKey();
> 		}
>
>
> 		private static void SetUpReceiver(IConnectionFactory factory)
> 		{
> 			// set up receiver
> 			IConnection rConnection = factory.CreateConnection();
> 			ISession rSession = rConnection.CreateSession();
> 			IMessageConsumer rConsumer =
> rSession.CreateConsumer(rSession.GetQueue(REQUEST_QUEUE));
> 			IMessageProducer rProducer = rSession.CreateProducer();
> 			rConsumer.Listener += delegate(IMessage message)
> 			{
> 				OnMessage(rSession, rProducer, message);
> 			};
> 			rConnection.Start();
> 		}
>
> 		private static void SetUpSender(IConnectionFactory factory)
> 		{
> 			IConnection sConnection = factory.CreateConnection();
> 			ISession sSession = sConnection.CreateSession();
> 			IMessageProducer sProducer =
> sSession.CreateProducer(sSession.GetQueue(REQUEST_QUEUE));
> 			IDestination replyDestination = sSession.CreateTemporaryQueue();
> 			IMessageConsumer sConsumer = sSession.CreateConsumer(replyDestination);
> 			sConnection.Start();
>
> 			for (int i = 0; i < 5; i++)
> 			{
> 				Console.WriteLine("Test " + i);
>
> 				// send message and wait for reply
> 				IMessage requestMsg = sSession.CreateTextMessage("Request" + i);
> 				requestMsg.NMSReplyTo = replyDestination;
>
> 				startOffset = DateTime.Now;
>
> 				sProducer.Send(requestMsg, false, NMSConstants.defaultPriority,
> NMSConstants.defaultTimeToLive);
>
> 				WriteTimedMessage("Request message sent");
>
> 				IMessage replyMsg = sConsumer.Receive();
>
> 				WriteTimedMessage("Reply message received");
> 			}
> 		}
>
> 		private static void OnMessage(ISession session, IMessageProducer
> producer, IMessage message)
> 		{
> 			WriteTimedMessage("Request message received");
>
> 			IMessage replyMsg = session.CreateTextMessage("Reply");
> 			producer.Send(message.NMSReplyTo, replyMsg, false,
> NMSConstants.defaultPriority, NMSConstants.defaultTimeToLive);
>
> 			WriteTimedMessage("Reply message sent");
> 		}
>
>
> 		private static void WriteTimedMessage(String message)
> 		{
> 			lock (typeof(SimpleTest))
> 			{
> 				TimeSpan diff = DateTime.Now - startOffset;
> 				Console.WriteLine("{0} ms: {1}", diff.TotalMilliseconds, message);
> 			}
> 		}
> 	}
> }
>
>
>
>