Hi,

I have been using the attached patch to look at how each LWLock relates to each other in various types of runs.


The patch adds the following fields to a LWLOCK_STATS build:

 sh_acquire_max (int):

   The maximum shared locks in series for the lock

 ex_acquire_max (int):

   The maximum exclusive locks in series for the lock

 max_waiters (int):

   The maximum numbers of waiters

Also attached is a sample report using FlameGraphs from a pgbench run using

 -c 80 -j 80 -T 300

See README for additional details.

If there is an interest I'll add the patch to the next CommitFest.

Thanks for considering, and any feedback is most welcomed.

Best regards,
 Jesper
*** /tmp/kex1qF_lwlock.c	2015-09-15 08:53:54.457279180 -0400
--- src/backend/storage/lmgr/lwlock.c	2015-09-15 08:52:09.645283891 -0400
***************
*** 163,172 ****
--- 163,175 ----
  {
  	lwlock_stats_key key;
  	int			sh_acquire_count;
+ 	int			sh_acquire_max;
  	int			ex_acquire_count;
+ 	int			ex_acquire_max;
  	int			block_count;
  	int			dequeue_self_count;
  	int			spin_delay_count;
+ 	int			max_waiters;
  }	lwlock_stats;
  
  static HTAB *lwlock_stats_htab;
***************
*** 297,308 ****
  
  	while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
  	{
! 		fprintf(stderr,
! 				"PID %d lwlock %s %d: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
! 				MyProcPid, LWLockTrancheArray[lwstats->key.tranche]->name,
! 				lwstats->key.instance, lwstats->sh_acquire_count,
! 				lwstats->ex_acquire_count, lwstats->block_count,
! 				lwstats->spin_delay_count, lwstats->dequeue_self_count);
  	}
  
  	LWLockRelease(&MainLWLockArray[0].lock);
--- 300,321 ----
  
  	while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
  	{
! 		if (lwstats->key.tranche == 0 && lwstats->key.instance < NUM_INDIVIDUAL_LWLOCKS)
! 			fprintf(stderr,
! 					"PID %d lwlock %s: shacq %u shmax %u exacq %u exmax %u blk %u spindelay %u dequeue self %u maxw %u\n",
! 					MyProcPid, MainLWLockNames[lwstats->key.instance],
! 					lwstats->sh_acquire_count, lwstats->sh_acquire_max,
! 					lwstats->ex_acquire_count, lwstats->ex_acquire_max,
! 					lwstats->block_count, lwstats->spin_delay_count, lwstats->dequeue_self_count,
! 					lwstats->max_waiters);
! 		else
! 			fprintf(stderr,
! 					"PID %d lwlock %s %d: shacq %u shmax %u exacq %u exmax %u blk %u spindelay %u dequeue self %u maxw %u\n",
! 					MyProcPid, LWLockTrancheArray[lwstats->key.tranche]->name,
! 					lwstats->key.instance, lwstats->sh_acquire_count, lwstats->sh_acquire_max,
! 					lwstats->ex_acquire_count, lwstats->ex_acquire_max,
! 					lwstats->block_count, lwstats->spin_delay_count, lwstats->dequeue_self_count,
! 					lwstats->max_waiters);
  	}
  
  	LWLockRelease(&MainLWLockArray[0].lock);
***************
*** 330,339 ****
--- 343,355 ----
  	if (!found)
  	{
  		lwstats->sh_acquire_count = 0;
+ 		lwstats->sh_acquire_max = 0;
  		lwstats->ex_acquire_count = 0;
+ 		lwstats->ex_acquire_max = 0;
  		lwstats->block_count = 0;
  		lwstats->dequeue_self_count = 0;
  		lwstats->spin_delay_count = 0;
+ 		lwstats->max_waiters = 0;
  	}
  	return lwstats;
  }
***************
*** 774,779 ****
--- 790,798 ----
  LWLockQueueSelf(LWLock *lock, LWLockMode mode)
  {
  #ifdef LWLOCK_STATS
+ 	bool include;
+ 	int counter, size;
+ 	dlist_iter iter;
  	lwlock_stats *lwstats;
  
  	lwstats = get_lwlock_stats_entry(lock);
***************
*** 792,797 ****
--- 811,856 ----
  
  #ifdef LWLOCK_STATS
  	lwstats->spin_delay_count += SpinLockAcquire(&lock->mutex);
+ 
+ 	/*
+ 	 * We scan the list of waiters from the back in order to find
+ 	 * out how many of the same lock type are waiting for a lock.
+ 	 * Similar types have the potential to be groupped together.
+ 	 *
+ 	 * We also count the number of waiters, including ourself.
+ 	 */
+ 	include = true;
+ 	size = 1;
+ 	counter = 1;
+ 
+ 	dlist_reverse_foreach(iter, &lock->waiters)
+ 	{
+ 		if (include)
+ 		{
+ 			PGPROC	   *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ 
+ 			if (waiter->lwWaitMode == mode)
+ 				counter += 1;
+ 			else
+ 				include = false;
+ 		}
+ 		
+ 		size += 1;
+ 	}
+ 
+ 	if (mode == LW_EXCLUSIVE || mode == LW_WAIT_UNTIL_FREE)
+ 	{
+ 		if (counter > lwstats->ex_acquire_max)
+ 			lwstats->ex_acquire_max = counter;
+ 	}
+ 	else if (mode == LW_SHARED)
+ 	{
+ 		if (counter > lwstats->sh_acquire_max)
+ 			lwstats->sh_acquire_max = counter;
+ 	}
+ 
+ 	if (size > lwstats->max_waiters)
+ 		lwstats->max_waiters = size;
  #else
  	SpinLockAcquire(&lock->mutex);
  #endif
***************
*** 943,951 ****
--- 1002,1018 ----
  #ifdef LWLOCK_STATS
  	/* Count lock acquisition attempts */
  	if (mode == LW_EXCLUSIVE)
+ 	{
  		lwstats->ex_acquire_count++;
+ 		if (lwstats->ex_acquire_max == 0)
+ 			lwstats->ex_acquire_max = 1;
+ 	}
  	else
+ 	{
  		lwstats->sh_acquire_count++;
+ 		if (lwstats->sh_acquire_max == 0)
+ 			lwstats->sh_acquire_max = 1;
+ 	}
  #endif   /* LWLOCK_STATS */
  
  	/*

Attachment: sample_report.tar.gz
Description: application/gzip

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to