Re: [HACKERS] Memory Accounting v11

2015-07-10 Thread Jeff Davis
On Thu, 2015-07-09 at 14:41 +1200, David Rowley wrote:

> Are you going to implement this? or are you happy with the single
> level context tracking is the right thing?
> I'm going to mark the patch as waiting on author for now.

Attached a version of the patch that does multi-level tracking, v12. It
does this is a simpler way, just like an earlier version of the patch:
it simply traverses up the chain and adds to each parent in a
"total_allocated" field.

The idea you mentioned is a possible optimization of this idea, but it
only makes sense if I'm able to detect a difference between v11
(single-level) and v12 (multi-level). I tried Robert's test[1] again and
I didn't see a difference on my workstation (technically, v12 came out
the fastest, which means I'm just seeing noise anyway), so I can't
evaluate whether your idea will improve things.

After talking with a few people at PGCon, small noisy differences in CPU
timings can appear for almost any tweak to the code, and aren't
necessarily cause for major concern.

Regards,
Jeff Davis

[1] pgbench -i -s 300, then do the following 3 times each for master,
v11, and v12, and take the median of logged traces:

start server; set trace_sort=on; reindex index pgbench_accounts_pkey;
stop server

*** a/src/backend/utils/mmgr/aset.c
--- b/src/backend/utils/mmgr/aset.c
***
*** 242,247  typedef struct AllocChunkData
--- 242,249 
  #define AllocChunkGetPointer(chk)	\
  	((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
  
+ static void update_alloc(MemoryContext context, int64 size);
+ 
  /*
   * These functions implement the MemoryContext API for AllocSet contexts.
   */
***
*** 500,505  AllocSetContextCreate(MemoryContext parent,
--- 502,510 
  	 errdetail("Failed while creating memory context \"%s\".",
  			   name)));
  		}
+ 
+ 		update_alloc((MemoryContext) set, blksize);
+ 
  		block->aset = set;
  		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
  		block->endptr = ((char *) block) + blksize;
***
*** 590,595  AllocSetReset(MemoryContext context)
--- 595,602 
  		else
  		{
  			/* Normal case, release the block */
+ 			update_alloc(context, -(block->endptr - ((char*) block)));
+ 
  #ifdef CLOBBER_FREED_MEMORY
  			wipe_mem(block, block->freeptr - ((char *) block));
  #endif
***
*** 635,640  AllocSetDelete(MemoryContext context)
--- 642,654 
  #ifdef CLOBBER_FREED_MEMORY
  		wipe_mem(block, block->freeptr - ((char *) block));
  #endif
+ 
+ 		/*
+ 		 * Parent is already unlinked from this context, so we can't use
+ 		 * update_alloc(). The caller should have already subtracted from the
+ 		 * parents' total_allocated.
+ 		 */
+ 
  		free(block);
  		block = next;
  	}
***
*** 672,677  AllocSetAlloc(MemoryContext context, Size size)
--- 686,694 
  		block = (AllocBlock) malloc(blksize);
  		if (block == NULL)
  			return NULL;
+ 
+ 		update_alloc(context, blksize);
+ 
  		block->aset = set;
  		block->freeptr = block->endptr = ((char *) block) + blksize;
  
***
*** 861,866  AllocSetAlloc(MemoryContext context, Size size)
--- 878,885 
  		if (block == NULL)
  			return NULL;
  
+ 		update_alloc(context, blksize);
+ 
  		block->aset = set;
  		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
  		block->endptr = ((char *) block) + blksize;
***
*** 964,969  AllocSetFree(MemoryContext context, void *pointer)
--- 983,991 
  			set->blocks = block->next;
  		else
  			prevblock->next = block->next;
+ 
+ 		update_alloc(context, -(block->endptr - ((char*) block)));
+ 
  #ifdef CLOBBER_FREED_MEMORY
  		wipe_mem(block, block->freeptr - ((char *) block));
  #endif
***
*** 1077,1082  AllocSetRealloc(MemoryContext context, void *pointer, Size size)
--- 1099,1105 
  		AllocBlock	prevblock = NULL;
  		Size		chksize;
  		Size		blksize;
+ 		Size		oldblksize;
  
  		while (block != NULL)
  		{
***
*** 1094,1102  AllocSetRealloc(MemoryContext context, void *pointer, Size size)
--- 1117,1130 
  		/* Do the realloc */
  		chksize = MAXALIGN(size);
  		blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
+ 		oldblksize = block->endptr - ((char *)block);
+ 
  		block = (AllocBlock) realloc(block, blksize);
  		if (block == NULL)
  			return NULL;
+ 
+ 		update_alloc(context, blksize - oldblksize);
+ 
  		block->freeptr = block->endptr = ((char *) block) + blksize;
  
  		/* Update pointers since block has likely been moved */
***
*** 1361,1363  AllocSetCheck(MemoryContext context)
--- 1389,1407 
  }
  
  #endif   /* MEMORY_CONTEXT_CHECKING */
+ 
+ /*
+  * Update self_allocated and total_allocated for the context. Size can be
+  * positive or negative.
+  */
+ void
+ update_alloc(MemoryContext context, int64 size)
+ {
+ 	MemoryContext	parent;
+ 
+ 	context->self_allocated += size;
+ 
+ 	/* update total_allocated for self and all parents */
+ 	for (parent

Re: [HACKERS] polymorphic types - enforce casting to most common type automatically

2015-07-10 Thread Pavel Stehule
2015-07-10 23:19 GMT+02:00 Pavel Stehule :

> Hi
>
> 2015-07-10 18:43 GMT+02:00 Tom Lane :
>
>> Pavel Stehule  writes:
>> > now a functions with more than one polymorphic arguments are relative
>> > fragile due missing casting to most common type. Some our "functions"
>> like
>> > "coalesce" can do it, so it is surprising for our users.
>>
>> > our custom polymorphic function foo(anyelement, anyelement) working
>> well for
>> > foo(10,20) or foo(10.1, 20.1), but not for foo(10, 20.1)
>>
>> > I am thinking, so we can add a searching most common type stage without
>> > breaking to backing compatibility.
>>
>> > What do you think about it?
>>
>> I see nobody's replied to this, still, so ...
>>
>> I think this is simply a bad idea, for a couple of reasons:
>>
>> 1. It will reduce predictability of type resolution.
>>
>
> I don't think - same mechanism we use - it doesn't introduce some new.
>
>
>>
>> 2. It will greatly increase the risk of getting "ambiguous function call"
>> failures, because of adding more possible ways to match the same call.
>> (The argument that we'd not break backwards compatibility is thus bogus.)
>>
>
> Maybe I not described well my idea.
>
> This can generate new conflicts only when new behave will be different
> than old behave. And different old behave is not possible - it fails on
> error now. So there is possible, with this patch, some queries can fail on
> conflict, but this code fails on "function doesn't exists" now. So if there
> is some possibility of breaking compatibility, then one error can be
> replaced by different error. It is known best practice to don't mix
> polymorphic parameters and function overloading.
>
> Why I need it - the motivation, why I returned to this topic is issue
> https://github.com/orafce/orafce/issues/17 and some questions about same
> topic on stackoverflow.
>
> There is workaround with "any" type - but I have to repeat lot of work
> what core analyzer can do, and the code in extension is longer. And I have
> to write extension in C.
>

It worse - workaround with "any" isn't good enough for implementation NVL
function - because I cannot to change result type inside function.

I know, so you dislike parser/analyzer hook, but is there any other
possibility? Some hint in pg_class?

Regards

Pavel


>
>
>>
>> Worth noting for onlookers is that the submitted patch seems to be using
>> UNION-style rules to determine a common type for anyelement arguments,
>> not just counting the "most common" type among the arguments as you might
>> think from the subject.  But that doesn't make things any better.
>>
>
> it is related to only polymorphic types.
>
>>
>> An example of what would presumably happen if we adopted this sort of rule
>> (I've not checked whether the patch as written does this, but it would
>> logically follow) is that appending a float to an integer array would
>> cause the whole array to be silently promoted to float, with attendant
>> possible loss of precision for existing array elements.
>
>
> it is based on select_common_type() - so it is use only available implicit
> casts.
>
>
>> That does not
>> seem to me to satisfy the principle of least astonishment.  Related,
>> even more astonishing behaviors could ensue from type promotion in
>> anyrange situations, eg range_contains_elem(anyrange,anyelement).
>> So I think it's just as well that we make people write a cast to show
>> what they mean in such cases.
>>
>
> The polymorphic parameters create much bigger space - if somebody needs to
> less variability, then he doesn't use polymorphic params.
>
> I understand to some situation, when we prefer strict work with
> polymorphic parameters - theoretically we can introduce new option that
> enforce it.
>
>
>> In fact, if you discount cases involving anyarray and anyrange, we do not
>> have *any* built-in functions for which this patch would do anything,
>> except for the three-argument forms of lead() and lag(), where I think it
>> would be rather astonishing to let the default-value argument control the
>> result type, anyway.  This leaves me feeling dubious both about the actual
>> scope of the use-case for such a change, and about whether "use the UNION
>> rules" would be a sensible heuristic even if we wanted to do something.
>> There seem to be too many cases where it's not a great idea to put all the
>> arguments on exactly equal footing for deciding what common type to
>> choose.
>>
>
> Very common problem of polymorphic parameters is mix of numeric and
> integers as parameters. It is one known gotcha - and I am trying to solve
> it.
>
> Regards
>
> Pavel
>
>
>>
>> So I'm inclined to mark this patch as Rejected.
>>
>> regards, tom lane
>>
>
>


Re: [HACKERS] more RLS oversights

2015-07-10 Thread Joe Conway
On 07/03/2015 10:03 AM, Noah Misch wrote:
> (1) CreatePolicy() and AlterPolicy() omit to call assign_expr_collations() on
> the node trees.  Test case:
> 
> begin;
> set row_security = force;
> create table t (c) as values ('bar'::text);
> create policy p on t using (c < ('foo'::text COLLATE "C"));
> alter table t enable row level security;
> table pg_policy;  -- note ":inputcollid 0"
> select * from t;  -- ERROR:  could not determine which collation ...
> rollback;

The attached fixes this issue for me, but I am unsure whether we really
need/want the regression test. Given the recent push to increase test
coverage maybe so. Any objections?

Joe


diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index 11efc9f..7232983 100644
*** a/src/backend/commands/policy.c
--- b/src/backend/commands/policy.c
*** CreatePolicy(CreatePolicyStmt *stmt)
*** 538,543 
--- 538,547 
  		   EXPR_KIND_WHERE,
  		   "POLICY");
  
+ 	/* Fix up collation information */
+ 	assign_expr_collations(qual_pstate, qual);
+ 	assign_expr_collations(with_check_pstate, with_check_qual);
+ 
  	/* Open pg_policy catalog */
  	pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
  
*** AlterPolicy(AlterPolicyStmt *stmt)
*** 681,686 
--- 685,693 
  	EXPR_KIND_WHERE,
  	"POLICY");
  
+ 		/* Fix up collation information */
+ 		assign_expr_collations(qual_pstate, qual);
+ 
  		qual_parse_rtable = qual_pstate->p_rtable;
  		free_parsestate(qual_pstate);
  	}
*** AlterPolicy(AlterPolicyStmt *stmt)
*** 701,706 
--- 708,716 
  			   EXPR_KIND_WHERE,
  			   "POLICY");
  
+ 		/* Fix up collation information */
+ 		assign_expr_collations(with_check_pstate, with_check_qual);
+ 
  		with_check_parse_rtable = with_check_pstate->p_rtable;
  		free_parsestate(with_check_pstate);
  	}
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 4073c1b..eabfd93 100644
*** a/src/test/regress/expected/rowsecurity.out
--- b/src/test/regress/expected/rowsecurity.out
*** ERROR:  permission denied for relation c
*** 2730,2735 
--- 2730,2756 
  RESET SESSION AUTHORIZATION;
  DROP TABLE copy_t;
  --
+ -- Collation support
+ --
+ BEGIN;
+ SET row_security = force;
+ CREATE TABLE coll_t (c) AS VALUES ('bar'::text);
+ CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C"));
+ ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY;
+ SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass;
+inputcollid
+ --
+  inputcollid 950 
+ (1 row)
+ 
+ SELECT * FROM coll_t;
+   c  
+ -
+  bar
+ (1 row)
+ 
+ ROLLBACK;
+ --
  -- Clean up objects
  --
  RESET SESSION AUTHORIZATION;
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index fdd9b89..782824a 100644
*** a/src/test/regress/sql/rowsecurity.sql
--- b/src/test/regress/sql/rowsecurity.sql
*** RESET SESSION AUTHORIZATION;
*** 1088,1093 
--- 1088,1105 
  DROP TABLE copy_t;
  
  --
+ -- Collation support
+ --
+ BEGIN;
+ SET row_security = force;
+ CREATE TABLE coll_t (c) AS VALUES ('bar'::text);
+ CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C"));
+ ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY;
+ SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass;
+ SELECT * FROM coll_t;
+ ROLLBACK;
+ 
+ --
  -- Clean up objects
  --
  RESET SESSION AUTHORIZATION;

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] more RLS oversights

2015-07-10 Thread Noah Misch
On Fri, Jul 10, 2015 at 03:08:53PM -0700, Joe Conway wrote:
> On 07/03/2015 10:03 AM, Noah Misch wrote:
> > (1) CreatePolicy() and AlterPolicy() omit to call assign_expr_collations() 
> > on
> > the node trees.

> The attached fixes this issue for me, but I am unsure whether we really
> need/want the regression test. Given the recent push to increase test
> coverage maybe so.

I wouldn't remove the test from your patch.


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Freeze avoidance of very large table.

2015-07-10 Thread Jim Nasby

On 7/10/15 4:46 AM, Simon Riggs wrote:

On 10 July 2015 at 09:49, Sawada Masahiko mailto:sawada.m...@gmail.com>> wrote:


> It is a minor thing, but if there is no other use for this fourth
> "bit-space", it seems a shame to waste it when there is some use for it.  
I
> haven't looked at the code around this area to know how hard it would be 
to
> implement the setting and clearing of the bit.

I think so too, we would be able to use unused fourth status of bits
efficiently.
Should I include these improvement into this patch?
This topic should be discussed on another thread after this feature is
committed, I think.


The impossible state acts as a diagnostic check for us to ensure the
bitmap is not itself corrupt.

-1 for using it for another purpose.


AFAICS empty page is only interesting for vacuum truncate, which is a 
very short-term thing. It would be better to find a way to handle that 
differently.


In any case, that should definitely be a separate discussion from this 
patch.

--
Jim Nasby, Data Architect, Blue Treble Consulting, Austin TX
Data in Trouble? Get it in Treble! http://BlueTreble.com


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] pg_upgrade + Extensions

2015-07-10 Thread Andrew Dunstan
On Fri, Jul 10, 2015 at 5:05 PM, David E. Wheeler 
wrote:

> On Jul 10, 2015, at 11:32 AM, Smitha Pamujula <
> smitha.pamuj...@iovation.com> wrote:
>
>
> > Your installation references loadable libraries that are missing from the
> > new installation.  You can add these libraries to the new installation,
> > or remove the functions using them from the old installation.  A list of
> > problem libraries is in the file:
> > loadable_libraries.txt
> >
> > Failure, exiting
> > [postgres@pdxdvrptsrd04 ~]$ cat loadable_libraries.txt
> > Could not load library "json_build"
> > ERROR:  could not access file "json_build": No such file or directory
>
> So you drop the json_build extension before upgrading, but pg_upgrade
> still complains that it’s missing? That seems odd.
>
>
>

Are you sure the extension was uninstalled from every database in the
cluster? This seems likely to occur when you forgot to uninstall it from
some database (e.g. template1)

cheers

andrew


[HACKERS] Reusing abbreviated keys during second pass of ordered [set] aggregates

2015-07-10 Thread Peter Geoghegan
Currently, there are certain aggregates that sort some tuples before
making a second pass over their memtuples array (through the tuplesort
"gettuple" interface), calling one or more attribute equality operator
functions as they go. For example, this occurs during the execution of
the following queries:

-- Salary is a numeric column:
SELECT count(DISTINCT salary) AS distinct_compensation_levels FROM employees;
-- Get most common distinct salary in organization:
SELECT mode() WITHIN GROUP (ORDER BY salary) AS
most_common_compensation FROM employees;

Since these examples involve numeric comparisons, the equality
operator calls involved in that second pass are expensive. There is no
equality fast-path for numeric equality as there is for text; I
imagine that in practice most calls to texteq() are resolved based on
raw datum size mismatch, which is dirt cheap (while the alternative, a
memcmp(), is still very cheap). Furthermore, numeric abbreviated keys
have a good chance of capturing the entropy of the original values
more effectively than we might expect with a text attribute. That
means that in the case of numeric, even sorted, adjacent pairs of
abbreviated keys have a pretty good chance of being distinct in the
real world (if their corresponding authoritative values are themselves
distinct).

I think we can avoid this expense much of the time.

Patch, Performance
===

Attached patch makes several cases like this try and get away with
only using the pre-existing abbreviated keys from the sort (to
determine inequality). On my laptop, it removes 15% - 25% of the run
time of cases similar to the above, with a high cardinality numeric
attribute of uniformly distributed values. As usual with my work on
sorting, multiple concurrent sorts by multiple backends are helped
most; that puts the improvements for realistic cases involving a text
attribute at about 5% (the texteq() memcmp() is cheap, but not free)
with 4 clients using pgbench.

The numeric cases above are now at about the same speed as similar
queries that use a double precision floating point number attribute.
9.5-era testing shows that sort-heavy numeric cases are often about as
fast as "equivalent" double cases, so it seems worthwhile to mostly
eliminate this additional numeric overhead that cases like the above
still incur.

I imagine citext would benefit much more here once it has abbreviation
support (which should be a TODO item).

Omissions
--

I haven't attempted to accelerate AGG_SORTED strategy aggregates,
which looked like it would require more invasive changes. It seemed
like more trouble than it was worth, given that crossing group
boundaries is something that won't occur very often with typical
usage, and given the fact that text is naturally pretty fast there
anyway (who groups by a numeric attribute?). Similarly, I did not
teach setop_retrieve_direct() to consider the possibility that a set
operation (like a UNION) could reuse abbreviated keys if it happens to
be fed by a sort node. Finally, I did not accelerate the merging of
spools during B-Tree index builds, even though that actually seems
quite possible -- that case just seemed marginal. We currently have no
test coverage for that case [1], so I guess its performance can't be
too important.

SortSupport contract


As explained in the first patch's commit message, I revised the
contract that SortSupport has with opclasses. Should this proposal be
accepted, we should backpatch the first patch to 9.5, to prevent
anyone from building abbreviation support that won't work on 9.6 (even
if that is very unlikely). In general, it seems pretty silly to me to
not make use of the abbreviated keys that the sort already went to the
trouble of building, and it seems like the revision to the SortSupport
contract is not likely to notably limit any interesting cases in the
future, so I think that this will be uncontroversial.

[1] 
http://www.postgresql.org/message-id/flat/cam3swztvetvdysxee7py-8ar+-+xrapkjcze-fh_v+zwxmu...@mail.gmail.com#cam3swztvetvdysxee7py-8ar+-+xrapkjcze-fh_v+zwxmu...@mail.gmail.com
-- 
Peter Geoghegan
From 7ae66ebcda9de24b4b148ad90630d89a401feb68 Mon Sep 17 00:00:00 2001
From: Peter Geoghegan 
Date: Mon, 6 Jul 2015 13:37:26 -0700
Subject: [PATCH 2/2] Reuse abbreviated keys in ordered [set] aggregates

When processing ordered aggregates following a sort that could make use
of the abbreviated key optimization, only call the equality operator to
compare successive pairs of tuples when their abbreviated keys were not
equal.  Only strict abbreviated key binary inequality is considered,
which is safe.
---
 src/backend/catalog/index.c|  2 +-
 src/backend/executor/nodeAgg.c | 20 
 src/backend/executor/nodeSort.c|  2 +-
 src/backend/utils/adt/orderedsetaggs.c | 33 ++
 src/backend/utils/sort/tuplesort.c | 42 --
 src/include/utils/tuplesort.h   

Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Jim Nasby

On 7/10/15 2:20 PM, Mike Blackwell wrote:

> postgres@ly19:~$ pg_config
> You need to install postgresql-server-dev-X.Y for building a server-side
> extension or libpq-dev for building a client-side application.
>
> Which is worse having to install yet another package or having a command
> line option?

It seems to me that this is a Debian packaging issue, not an upstream
issue, isn't it?  If you want to fix the problem in this way, then
surely whatever package contains pg_upgrade should also contain
pg_config.


​Indeed.  An interesting packaging choice.  I'd think it belongs to an admin
category along with pg_upgrade, pg_dump, etc., rather than a development
package.  Surely it could be useful for admin scripts?


What makes it far worse is that pg_config isn't wrapped like the rest of 
the tools are. So you can only have one "development" package installed 
at once. We've pushed to change this to no effect. :/

--
Jim Nasby, Data Architect, Blue Treble Consulting, Austin TX
Data in Trouble? Get it in Treble! http://BlueTreble.com


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] polymorphic types - enforce casting to most common type automatically

2015-07-10 Thread Pavel Stehule
Hi

2015-07-10 18:43 GMT+02:00 Tom Lane :

> Pavel Stehule  writes:
> > now a functions with more than one polymorphic arguments are relative
> > fragile due missing casting to most common type. Some our "functions"
> like
> > "coalesce" can do it, so it is surprising for our users.
>
> > our custom polymorphic function foo(anyelement, anyelement) working well
> for
> > foo(10,20) or foo(10.1, 20.1), but not for foo(10, 20.1)
>
> > I am thinking, so we can add a searching most common type stage without
> > breaking to backing compatibility.
>
> > What do you think about it?
>
> I see nobody's replied to this, still, so ...
>
> I think this is simply a bad idea, for a couple of reasons:
>
> 1. It will reduce predictability of type resolution.
>

I don't think - same mechanism we use - it doesn't introduce some new.


>
> 2. It will greatly increase the risk of getting "ambiguous function call"
> failures, because of adding more possible ways to match the same call.
> (The argument that we'd not break backwards compatibility is thus bogus.)
>

Maybe I not described well my idea.

This can generate new conflicts only when new behave will be different than
old behave. And different old behave is not possible - it fails on error
now. So there is possible, with this patch, some queries can fail on
conflict, but this code fails on "function doesn't exists" now. So if there
is some possibility of breaking compatibility, then one error can be
replaced by different error. It is known best practice to don't mix
polymorphic parameters and function overloading.

Why I need it - the motivation, why I returned to this topic is issue
https://github.com/orafce/orafce/issues/17 and some questions about same
topic on stackoverflow.

There is workaround with "any" type - but I have to repeat lot of work what
core analyzer can do, and the code in extension is longer. And I have to
write extension in C.


>
> Worth noting for onlookers is that the submitted patch seems to be using
> UNION-style rules to determine a common type for anyelement arguments,
> not just counting the "most common" type among the arguments as you might
> think from the subject.  But that doesn't make things any better.
>

it is related to only polymorphic types.

>
> An example of what would presumably happen if we adopted this sort of rule
> (I've not checked whether the patch as written does this, but it would
> logically follow) is that appending a float to an integer array would
> cause the whole array to be silently promoted to float, with attendant
> possible loss of precision for existing array elements.


it is based on select_common_type() - so it is use only available implicit
casts.


> That does not
> seem to me to satisfy the principle of least astonishment.  Related,
> even more astonishing behaviors could ensue from type promotion in
> anyrange situations, eg range_contains_elem(anyrange,anyelement).
> So I think it's just as well that we make people write a cast to show
> what they mean in such cases.
>

The polymorphic parameters create much bigger space - if somebody needs to
less variability, then he doesn't use polymorphic params.

I understand to some situation, when we prefer strict work with polymorphic
parameters - theoretically we can introduce new option that enforce it.


> In fact, if you discount cases involving anyarray and anyrange, we do not
> have *any* built-in functions for which this patch would do anything,
> except for the three-argument forms of lead() and lag(), where I think it
> would be rather astonishing to let the default-value argument control the
> result type, anyway.  This leaves me feeling dubious both about the actual
> scope of the use-case for such a change, and about whether "use the UNION
> rules" would be a sensible heuristic even if we wanted to do something.
> There seem to be too many cases where it's not a great idea to put all the
> arguments on exactly equal footing for deciding what common type to
> choose.
>

Very common problem of polymorphic parameters is mix of numeric and
integers as parameters. It is one known gotcha - and I am trying to solve
it.

Regards

Pavel


>
> So I'm inclined to mark this patch as Rejected.
>
> regards, tom lane
>


Re: [HACKERS] pg_upgrade + Extensions

2015-07-10 Thread David E. Wheeler
On Jul 10, 2015, at 11:32 AM, Smitha Pamujula  
wrote:

> I just tested and yes that worked. Once we have the new library for the 
> hostname, pg_upgrade is not complaining about the hostname extension. 

Great, thank you Smitha -- and Tom for the pointer.

> Your installation references loadable libraries that are missing from the
> new installation.  You can add these libraries to the new installation,
> or remove the functions using them from the old installation.  A list of
> problem libraries is in the file:
> loadable_libraries.txt
> 
> Failure, exiting
> [postgres@pdxdvrptsrd04 ~]$ cat loadable_libraries.txt
> Could not load library "json_build"
> ERROR:  could not access file "json_build": No such file or directory

So you drop the json_build extension before upgrading, but pg_upgrade still 
complains that it’s missing? That seems odd.

Best,

David



smime.p7s
Description: S/MIME cryptographic signature


Re: [HACKERS] Additional role attributes && superuser review

2015-07-10 Thread Heikki Linnakangas

On 05/08/2015 07:35 AM, Stephen Frost wrote:

Gavin,

* Gavin Flower (gavinflo...@archidevsys.co.nz) wrote:

What if I had a company with several subsidiaries using the same
database, and want to prefix roles and other things with the
subsidiary's initials? (I am not saying this would be a good
architecture!!!)


If you admit that it's not a good solution then I'm not quite sure how
much we really want to worry about it. :)


For example if one subsidiary was called 'Perfect Gentleman', so I
would want roles prefixed by 'pg_' and would be annoyed if I
couldn't!


You might try creating a schema for that user..  You'll hopefully find
it difficult to do. :)

In consideration of the fact that you can't create schemas which start
with "pg_" and therefore the default search_path wouldn't work for that
user, and that we also reserve "pg_" for tablespaces, I'm not inclined
to worry too much about this case.  Further, if we accept this argument,
then we simply can't ever provide additional default or system roles,
ever.  That'd be a pretty narrow corner to have painted ourselves into.


Well, you could still provide them through some other mechanism, like 
require typing "SYSTEM ROLE pg_backup" any time you mean that magic 
role. But I agree, reserving pg_* is much better. I wish we had done it 
when we invented roles (6.5?), so there would be no risk that you would 
upgrade from a system that already has a "pg_foo" role. But I think it'd 
still be OK.


I agree with Robert's earlier point that this needs to be split into 
multiple patches, which can then be reviewed and discussed separately. 
Pending that, I'm going to mark this as "Waiting on author" in the 
commitfest.


- Heikki



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] PATCH: index-only scans with partial indexes

2015-07-10 Thread Tom Lane
Tomas Vondra  writes:
> currently partial indexes end up not using index only scans in most 
> cases, because check_index_only() is overly conservative, as explained 
> in this comment:
> ...

> I've done a bunch of tests, and I do see small (hardly noticeable) 
> increase in planning time with long list of WHERE clauses, because all 
> those need to be checked against the index predicate. Not sure if this 
> is what's meant by 'quite expensive' in the comment. Moreover, this was 
> more than compensated by the IOS benefits (even with everything in RAM).

> But maybe it's possible to fix that somehow? For example, we're 
> certainly doing those checks elsewhere when deciding which clauses need 
> to be evaluated at run-time, so maybe we could cache that somehow?

The key problem here is that you're doing those proofs vastly earlier than
before, for indexes that might not get used at all in the final plan.
If you do some tests with multiple partial indexes you will probably see
a bigger planning-time penalty.

Perhaps we should bite the bullet and do it anyway, but I'm pretty
suspicious of any claim that the planning cost is minimal.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Joshua D. Drake


On 07/10/2015 12:10 PM, Alvaro Herrera wrote:


It seems to me that this is a Debian packaging issue, not an upstream
issue, isn't it?  If you want to fix the problem in this way, then
surely whatever package contains pg_upgrade should also contain
pg_config.

Why are you not using pg_upgradecluster anyway?  There's a mode to use
pg_upgrade there.



Ignorance. I still remember pg_upgradecluster from the pg_dumpall days.

JD


--
Command Prompt, Inc. - http://www.commandprompt.com/  503-667-4564
PostgreSQL Centered full stack support, consulting and development.
Announcing "I'm offended" is basically telling the world you can't
control your own emotions, so everyone else should do it for you.


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Mike Blackwell
On Fri, Jul 10, 2015 at 2:10 PM, Alvaro Herrera 
 wrote:

> Joshua D. Drake wrote:
> >
> > On 07/10/2015 11:01 AM, Mike Blackwell wrote:
> > >Does pg_config show the correct location?
> > Good idea but:
> >
> > postgres@ly19:~$ pg_config
> > You need to install postgresql-server-dev-X.Y for building a server-side
> > extension or libpq-dev for building a client-side application.
> >
> > Which is worse having to install yet another package or having a command
> > line option?
>
> It seems to me that this is a Debian packaging issue, not an upstream
> issue, isn't it?  If you want to fix the problem in this way, then
> surely whatever package contains pg_upgrade should also contain
> pg_config.
>
>
​Indeed.  An interesting packaging choice.  I'd think it belongs to an admin
category along with pg_upgrade, pg_dump, etc., rather than a development
package.  Surely it could be useful for admin scripts?

__
*Mike Blackwell | Technical Analyst, Distribution Services/Rollout
Management | RR Donnelley*
1750 Wallace Ave | St Charles, IL 60174-3401
Office: 630.313.7818
mike.blackw...@rrd.com
http://www.rrdonnelley.com

​ 


Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Alvaro Herrera
Joshua D. Drake wrote:
> 
> On 07/10/2015 11:01 AM, Mike Blackwell wrote:
> >Does pg_config show the correct location?  If so, perhaps pg_upgrade
> >could get the .conf location the same way rather than requiring a
> >command line option.
> 
> Good idea but:
> 
> postgres@ly19:~$ pg_config
> You need to install postgresql-server-dev-X.Y for building a server-side
> extension or libpq-dev for building a client-side application.
> 
> Which is worse having to install yet another package or having a command
> line option?

It seems to me that this is a Debian packaging issue, not an upstream
issue, isn't it?  If you want to fix the problem in this way, then
surely whatever package contains pg_upgrade should also contain
pg_config.

Why are you not using pg_upgradecluster anyway?  There's a mode to use
pg_upgrade there.

-- 
Álvaro Herrerahttp://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Minor issue with BRIN regression tests

2015-07-10 Thread Peter Geoghegan
On Tue, Jun 2, 2015 at 3:11 PM, Peter Geoghegan  wrote:
> Here is another patch, this time removing a useless ON CONFLICT DO UPDATE 
> test.

Can someone commit this, please?

-- 
Peter Geoghegan


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Joshua D. Drake


On 07/10/2015 11:01 AM, Mike Blackwell wrote:

Does pg_config show the correct location?  If so, perhaps pg_upgrade
could get the .conf location the same way rather than requiring a
command line option.


Good idea but:

postgres@ly19:~$ pg_config
You need to install postgresql-server-dev-X.Y for building a server-side 
extension or libpq-dev for building a client-side application.



Which is worse having to install yet another package or having a command 
line option?


Sincerely,

JD

--
Command Prompt, Inc. - http://www.commandprompt.com/  503-667-4564
PostgreSQL Centered full stack support, consulting and development.
Announcing "I'm offended" is basically telling the world you can't
control your own emotions, so everyone else should do it for you.


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] pg_upgrade + Extensions

2015-07-10 Thread Smitha Pamujula
Tom,

I just tested and yes that worked. Once we have the new library for the
hostname, pg_upgrade is not complaining about the hostname extension.

Another thing we found is this. We needed to drop json_build extension
before the upgrade. However the upgrade fails with the following error and
the way to resolve it is to install json_build94 library. Any ideas why
this might be?

/usr/pgsql-9.4/bin/pg_upgrade --check --link
...

Your installation references loadable libraries that are missing from the
new installation.  You can add these libraries to the new installation,
or remove the functions using them from the old installation.  A list of
problem libraries is in the file:
loadable_libraries.txt

Failure, exiting
[postgres@pdxdvrptsrd04 ~]$ cat loadable_libraries.txt
Could not load library "json_build"
ERROR:  could not access file "json_build": No such file or directory

Thanks
Smitha


On Fri, Jul 10, 2015 at 10:59 AM, Tom Lane  wrote:

> "David E. Wheeler"  writes:
> > My co-workers tell me that pg_upgrade told them to drop the colnames and
> > hostname extensions before upgrading from 9.3 to 9.4.
>
> Really?  I see nothing in the source code that would print any such
> advice.
>
> There *is* a check on whether .so libraries used by the source
> installation exist in the destination one.  But the preferred way to
> deal with that type of complaint is to install the needed libraries
> (in the destination's lib/ folder).  You shouldn't have to drop anything
> as long as you have a copy of the extension that works for the new PG
> version.
>
> regards, tom lane
>



-- 
Smitha Pamujula
Database Administrator // The Watch Woman

Direct: 503.943.6764
Mobile: 503.290.6214 // Twitter: iovation
www.iovation.com


[HACKERS] PATCH: index-only scans with partial indexes

2015-07-10 Thread Tomas Vondra

Hi,

currently partial indexes end up not using index only scans in most 
cases, because check_index_only() is overly conservative, as explained 
in this comment:


 * XXX this is overly conservative for partial indexes, since we will
 * consider attributes involved in the index predicate as required even
 * though the predicate won't need to be checked at runtime. (The same
 * is true for attributes used only in index quals, if we are certain
 * that the index is not lossy.)  However, it would be quite expensive
 * to determine that accurately at this point, so for now we take the
 * easy way out.

In other words, unless you include columns from the index predicate to 
the index, the planner will decide index only scans are not possible. 
Which is a bit unfortunate, because those columns are not needed at 
runtime, and will only increase the index size (and the main benefit of 
partial indexes is size reduction).


The attached patch fixes this by only considering clauses that are not 
implied by the index predicate. The effect is simple:


create table t as select i as a, i as b from
  generate_series(1,1000) s(i);

create index tidx_partial on t(b) where a > 1000 and a < 2000;

vacuum freeze t;
analyze t;

explain analyze select count(b) from t where a > 1000 and a < 2000;

QUERY PLAN
---
 Aggregate  (cost=39.44..39.45 rows=1 width=4)
(actual time=8.350..8.354 rows=1 loops=1)
   ->  Index Scan using tidx_partial on t
(cost=0.28..37.98 rows=585 width=4)
(actual time=0.034..4.368 rows=999 loops=1)
 Planning time: 0.197 ms
 Execution time: 8.441 ms
(4 rows)

explain analyze select count(b) from t where a > 1000 and a < 2000;

QUERY PLAN
---
 Aggregate  (cost=33.44..33.45 rows=1 width=4)
(actual time=8.019..8.023 rows=1 loops=1)
   ->  Index Only Scan using tidx_partial on t
(cost=0.28..31.98 rows=585 width=4)
(actual time=0.036..4.165 rows=999 loops=1)
 Heap Fetches: 0
 Planning time: 0.188 ms
 Execution time: 8.106 ms
(5 rows)


I've done a bunch of tests, and I do see small (hardly noticeable) 
increase in planning time with long list of WHERE clauses, because all 
those need to be checked against the index predicate. Not sure if this 
is what's meant by 'quite expensive' in the comment. Moreover, this was 
more than compensated by the IOS benefits (even with everything in RAM).


But maybe it's possible to fix that somehow? For example, we're 
certainly doing those checks elsewhere when deciding which clauses need 
to be evaluated at run-time, so maybe we could cache that somehow?


regards

--
Tomas Vondra  http://www.2ndQuadrant.com
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 9da5444..113747c 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -1798,13 +1798,13 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
 	 * Check that all needed attributes of the relation are available from the
 	 * index.
 	 *
-	 * XXX this is overly conservative for partial indexes, since we will
-	 * consider attributes involved in the index predicate as required even
-	 * though the predicate won't need to be checked at runtime.  (The same is
-	 * true for attributes used only in index quals, if we are certain that
-	 * the index is not lossy.)  However, it would be quite expensive to
-	 * determine that accurately at this point, so for now we take the easy
-	 * way out.
+	 * For partial indexes we won't consider attributes involved in clauses
+	 * implied by the index predicate, as those won't be needed at runtime.
+	 *
+	 * XXX The same is true for attributes used only in index quals, if we
+	 * are certain that the index is not lossy. However, it would be quite
+	 * expensive to determine that accurately at this point, so for now we
+	 * take the easy way out.
 	 */
 
 	/*
@@ -1819,6 +1819,28 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
 	{
 		RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
 
+		/*
+		 * If the index is partial, we won't consider the clauses that are
+		 * implied by the index predicate (those are not needed at runtime).
+		 */
+		if (index->indpred != NIL)
+		{
+			bool	implied = false;
+			List   *clauses  = NIL;
+
+			/* need a list for the 'implied_by' call */
+			clauses = lappend(clauses, (Node *) rinfo->clause);
+
+			implied = predicate_implied_by(clauses, index->indpred);
+
+			/* we generally don't free memory, but well ... */
+			list_free(clauses);
+
+			/* if the clause is implied by index predicate, skip it */
+			if (implied)
+continue;
+		}
+
 		pull_varattnos((No

Re: [HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Mike Blackwell
Does pg_config show the correct location?  If so, perhaps pg_upgrade could
get the .conf location the same way rather than requiring a command line
option.

__
*Mike Blackwell | Technical Analyst, Distribution Services/Rollout
Management | RR Donnelley*
1750 Wallace Ave | St Charles, IL 60174-3401
Office: 630.313.7818
mike.blackw...@rrd.com
http://www.rrdonnelley.com



* *

On Fri, Jul 10, 2015 at 12:40 PM, Joshua D. Drake 
wrote:

>
> Hackers,
>
> Simple problem (I think):
>
> 9.4 version of pg_upgrade said:
>
>  "/usr/lib/postgresql/9.4/bin/pg_ctl" -w -l "pg_upgrade_server.log" -D
> "/var/lib/postgresql/9.4/main" -o "-p 9400 -b -c synchronous_commit=off -c
> fsync=off -c full_page_writes=off  -c listen_addresses='' -c
> unix_socket_permissions=0700 -c
> unix_socket_directories='/var/lib/postgresql'" start
>
> postgres cannot access the server configuration file
> "/var/lib/postgresql/9.4/main/postgresql.conf": No such file or directory
>
> The issue is Debian/Ubuntu etc... don't have a postgresql.conf in the
> cluster. They keep it separately in /etc/postgresql.
>
> Could we get a flag that allows us to specifically point to where the conf
> filesare?
>
> JD
> --
> Command Prompt, Inc. - http://www.commandprompt.com/  503-667-4564
> PostgreSQL Centered full stack support, consulting and development.
> Announcing "I'm offended" is basically telling the world you can't
> control your own emotions, so everyone else should do it for you.
>
>
> --
> Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
> To make changes to your subscription:
> http://www.postgresql.org/mailpref/pgsql-hackers
>


Re: [HACKERS] pg_upgrade + Extensions

2015-07-10 Thread Tom Lane
"David E. Wheeler"  writes:
> My co-workers tell me that pg_upgrade told them to drop the colnames and
> hostname extensions before upgrading from 9.3 to 9.4.

Really?  I see nothing in the source code that would print any such
advice.

There *is* a check on whether .so libraries used by the source
installation exist in the destination one.  But the preferred way to
deal with that type of complaint is to install the needed libraries
(in the destination's lib/ folder).  You shouldn't have to drop anything
as long as you have a copy of the extension that works for the new PG
version.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] pg_upgrade + Ubuntu

2015-07-10 Thread Joshua D. Drake


Hackers,

Simple problem (I think):

9.4 version of pg_upgrade said:

 "/usr/lib/postgresql/9.4/bin/pg_ctl" -w -l "pg_upgrade_server.log" -D 
"/var/lib/postgresql/9.4/main" -o "-p 9400 -b -c synchronous_commit=off 
-c fsync=off -c full_page_writes=off  -c listen_addresses='' -c 
unix_socket_permissions=0700 -c 
unix_socket_directories='/var/lib/postgresql'" start


postgres cannot access the server configuration file 
"/var/lib/postgresql/9.4/main/postgresql.conf": No such file or directory


The issue is Debian/Ubuntu etc... don't have a postgresql.conf in the 
cluster. They keep it separately in /etc/postgresql.


Could we get a flag that allows us to specifically point to where the 
conf filesare?


JD
--
Command Prompt, Inc. - http://www.commandprompt.com/  503-667-4564
PostgreSQL Centered full stack support, consulting and development.
Announcing "I'm offended" is basically telling the world you can't
control your own emotions, so everyone else should do it for you.


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] pg_upgrade + Extensions

2015-07-10 Thread David E. Wheeler
Hackers,

My co-workers tell me that pg_upgrade told them to drop the colnames and 
hostname extensions before upgrading from 9.3 to 9.4. Fortunately, Postgres had 
 not recorded any dependencies on functions from these extensions (not sure why 
not, since we do user them, but for the moment grateful), so it wasn’t a big 
deal to drop them and then add them back after finishing the upgrade. But 
frankly I don’t understand why this was necessary. It’s true that they’re C 
extensions with shared libraries, but there are separate .so files for the 9.3 
and 9.4 installs.

Would there be a way to convince pg_upgrade that extensions don’t need to be 
dropped before upgrading?

Thanks,

David



smime.p7s
Description: S/MIME cryptographic signature


Re: [HACKERS] polymorphic types - enforce casting to most common type automatically

2015-07-10 Thread Tom Lane
Pavel Stehule  writes:
> now a functions with more than one polymorphic arguments are relative
> fragile due missing casting to most common type. Some our "functions" like
> "coalesce" can do it, so it is surprising for our users.

> our custom polymorphic function foo(anyelement, anyelement) working well for
> foo(10,20) or foo(10.1, 20.1), but not for foo(10, 20.1)

> I am thinking, so we can add a searching most common type stage without
> breaking to backing compatibility.

> What do you think about it?

I see nobody's replied to this, still, so ...

I think this is simply a bad idea, for a couple of reasons:

1. It will reduce predictability of type resolution.

2. It will greatly increase the risk of getting "ambiguous function call"
failures, because of adding more possible ways to match the same call.
(The argument that we'd not break backwards compatibility is thus bogus.)

Worth noting for onlookers is that the submitted patch seems to be using
UNION-style rules to determine a common type for anyelement arguments,
not just counting the "most common" type among the arguments as you might
think from the subject.  But that doesn't make things any better.

An example of what would presumably happen if we adopted this sort of rule
(I've not checked whether the patch as written does this, but it would
logically follow) is that appending a float to an integer array would
cause the whole array to be silently promoted to float, with attendant
possible loss of precision for existing array elements.  That does not
seem to me to satisfy the principle of least astonishment.  Related,
even more astonishing behaviors could ensue from type promotion in
anyrange situations, eg range_contains_elem(anyrange,anyelement).
So I think it's just as well that we make people write a cast to show
what they mean in such cases.

In fact, if you discount cases involving anyarray and anyrange, we do not
have *any* built-in functions for which this patch would do anything,
except for the three-argument forms of lead() and lag(), where I think it
would be rather astonishing to let the default-value argument control the
result type, anyway.  This leaves me feeling dubious both about the actual
scope of the use-case for such a change, and about whether "use the UNION
rules" would be a sensible heuristic even if we wanted to do something.
There seem to be too many cases where it's not a great idea to put all the
arguments on exactly equal footing for deciding what common type to
choose.

So I'm inclined to mark this patch as Rejected.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] RFC: replace pg_stat_activity.waiting with something more descriptive

2015-07-10 Thread Alexander Korotkov
On Fri, Jun 26, 2015 at 6:39 AM, Robert Haas  wrote:

> On Thu, Jun 25, 2015 at 9:23 AM, Peter Eisentraut  wrote:
> > On 6/22/15 1:37 PM, Robert Haas wrote:
> >> Currently, the only time we report a process as waiting is when it is
> >> waiting for a heavyweight lock.  I'd like to make that somewhat more
> >> fine-grained, by reporting the type of heavyweight lock it's awaiting
> >> (relation, relation extension, transaction, etc.).  Also, I'd like to
> >> report when we're waiting for a lwlock, and report either the specific
> >> fixed lwlock for which we are waiting, or else the type of lock (lock
> >> manager lock, buffer content lock, etc.) for locks of which there is
> >> more than one.  I'm less sure about this next part, but I think we
> >> might also want to report ourselves as waiting when we are doing an OS
> >> read or an OS write, because it's pretty common for people to think
> >> that a PostgreSQL bug is to blame when in fact it's the operating
> >> system that isn't servicing our I/O requests very quickly.
> >
> > Could that also cover waiting on network?
>
> Possibly.  My approach requires that the number of wait states be kept
> relatively small, ideally fitting in a single byte.  And it also
> requires that we insert pgstat_report_waiting() calls around the thing
> that is notionally blocking.  So, if there are a small number of
> places in the code where we do network I/O, we could stick those calls
> around those places, and this would work just fine.  But if a foreign
> data wrapper, or any other piece of code, does network I/O - or any
> other blocking operation - without calling pgstat_report_waiting(), we
> just won't know about it.
>

Idea of fitting wait information into single byte and avoid both locking
and atomic operations is attractive.
But how long we can go with it?
Could DBA make some conclusion by single querying of pg_stat_activity or
double querying?
In order to make a conclusion about system load one have to run daemon or
background worker which is continuously sampling current wait events.
Sampling current wait event with high rate also gives some overhead to the
system as well as locking or atomic operations.
Checking if backend is stuck isn't easy as well. If you don't expose how
long last wait event continues it's hard to distinguish getting stuck on
particular lock and high concurrency on that lock type.

I can propose following:

1) Expose more information about current lock to user. For instance, having
duration of current wait event, user can determine if backend is getting
stuck on particular event without sampling.
2) Accumulate per backend statistics about each wait event type: number of
occurrences and total duration. With this statistics user can identify
system bottlenecks again without sampling.

Number #2 will be provided as a separate patch.
Number #1 require different concurrency model. ldus will extract it from
"waits monitoring" patch shortly.

--
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company


Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Pavel Stehule
2015-07-10 16:16 GMT+02:00 Shulgin, Oleksandr 
:

> On Fri, Jul 10, 2015 at 4:04 PM, Pavel Stehule 
> wrote:
>
>>
>>>
 2. why you did indirect call via JsonOutContext?
>
> What is benefit
>
> dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid,
> false);
>
> instead
>
> json_out_value(&dst, )
>

>>> For consistency.  Even though we initialize the output context
>>> ourselves, there might be some code introduced between
>>> json_out_init_context() and dst.value() calls that replaces some of the
>>> callbacks, and then there would be a difference.
>>>
>>
>> with this consistency? I didn't see this style everywhere in Postgres?
>> Isn't it premature optimization?
>>
>
> Well, one could call it premature pessimization due to dynamic call
> overhead.
>
> IMO, the fact that json_out_init_context() sets the value callback to
> json_out_value is an implementation detail, the other parts of code should
> not rely on.  And for the Explain output, there definitely going to be
> *some* code between context initialization and output callbacks: these are
> done in a number of different functions.
>

Again - it is necessary? Postgres still use modular code, not OOP code. I
can understand the using of this technique, when I need a possibility to
change behave. But these function are used for printing JSON, not printing
any others.

Pavel


>
> --
> Alex
>
>


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Tom Lane
Andres Freund  writes:
> On July 10, 2015 4:16:59 PM GMT+02:00, Tom Lane  wrote:
>> Would that propagate down through multiple levels of CASCADE?  (Although
>> I'm not sure it would be sensible for a non-relocatable extension to
>> depend on a relocatable one, so maybe the need doesn't arise in
>> practice.)

> I'd day so. I was thinking it'd adding a flag that allows to pass a
> schema to a non relocatable extension. That'd then be passed down. I
> agree that it's unlikely to be used often.

Yeah, I was visualizing it slightly differently, as a separate
internal-only option "schema_if_needed", but it works out to the
same thing either way.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Andres Freund
On July 10, 2015 4:16:59 PM GMT+02:00, Tom Lane  wrote:
>Andres Freund  writes:
>> I think we should copy the SCHEMA option here and document that we
>use
>> the same schema. But it needs to be done in a way that doesn't error
>out
>> if the extension is not relocatable...
>
>Would that propagate down through multiple levels of CASCADE? 
>(Although
>I'm not sure it would be sensible for a non-relocatable extension to
>depend on a relocatable one, so maybe the need doesn't arise in
>practice.)

I'd day so. I was thinking it'd adding a flag that allows to pass a schema to a 
non relocatable extension. That'd then be passed down. I agree that it's 
unlikely to be used often.

Andres

--- 
Please excuse brevity and formatting - I am writing this on my mobile phone.


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Shulgin, Oleksandr
On Fri, Jul 10, 2015 at 4:04 PM, Pavel Stehule 
wrote:

>
>>
>>> 2. why you did indirect call via JsonOutContext?

 What is benefit

 dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid,
 false);

 instead

 json_out_value(&dst, )

>>>
>> For consistency.  Even though we initialize the output context ourselves,
>> there might be some code introduced between json_out_init_context() and
>> dst.value() calls that replaces some of the callbacks, and then there would
>> be a difference.
>>
>
> with this consistency? I didn't see this style everywhere in Postgres?
> Isn't it premature optimization?
>

Well, one could call it premature pessimization due to dynamic call
overhead.

IMO, the fact that json_out_init_context() sets the value callback to
json_out_value is an implementation detail, the other parts of code should
not rely on.  And for the Explain output, there definitely going to be
*some* code between context initialization and output callbacks: these are
done in a number of different functions.

--
Alex


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Tom Lane
Andres Freund  writes:
> I think we should copy the SCHEMA option here and document that we use
> the same schema. But it needs to be done in a way that doesn't error out
> if the extension is not relocatable...

Would that propagate down through multiple levels of CASCADE?  (Although
I'm not sure it would be sensible for a non-relocatable extension to
depend on a relocatable one, so maybe the need doesn't arise in practice.)

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Freeze avoidance of very large table.

2015-07-10 Thread Sawada Masahiko
On Fri, Jul 10, 2015 at 10:43 PM, Fujii Masao  wrote:
> On Fri, Jul 10, 2015 at 2:41 PM, Sawada Masahiko  
> wrote:
>> On Fri, Jul 10, 2015 at 3:05 AM, Sawada Masahiko  
>> wrote:
>>>
>>> Also something for pg_upgrade is also not yet.
>>>
>>> TODO
>>> - Test case for this feature
>>> - pg_upgrade support.
>>>
>>
>> I had forgotten to change the fork name of visibility map to "vfm".
>> Attached latest v7 patch.
>> Please review it.
>
> The compilation failed on my machine...
>
> gcc -Wall -Wmissing-prototypes -Wpointer-arith
> -Wdeclaration-after-statement -Wendif-labels
> -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
> -fwrapv -g -O0 -I../../../../src/include -D_GNU_SOURCE   -c -o
> visibilitymap.o visibilitymap.c
> make[4]: *** No rule to make target `heapfuncs.o', needed by
> `objfiles.txt'.  Stop.
> make[4]: *** Waiting for unfinished jobs
> ( echo src/backend/access/index/genam.o
> src/backend/access/index/indexam.o ) >objfiles.txt
> make[4]: Leaving directory `/home/postgres/pgsql/git/src/backend/access/index'
> gcc -Wall -Wmissing-prototypes -Wpointer-arith
> -Wdeclaration-after-statement -Wendif-labels
> -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
> -fwrapv -g -O0 -I../../../src/include -D_GNU_SOURCE   -c -o
> tablespace.o tablespace.c
> gcc -Wall -Wmissing-prototypes -Wpointer-arith
> -Wdeclaration-after-statement -Wendif-labels
> -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
> -fwrapv -g -O0 -I../../../src/include -D_GNU_SOURCE   -c -o
> instrument.o instrument.c
> make[4]: Leaving directory `/home/postgres/pgsql/git/src/backend/access/heap'
> make[3]: *** [heap-recursive] Error 2
> make[3]: Leaving directory `/home/postgres/pgsql/git/src/backend/access'
> make[2]: *** [access-recursive] Error 2
> make[2]: *** Waiting for unfinished jobs
>

Oops, I had forgotten to add new file heapfuncs.c.
Latest patch is attached.

Regards,

--
Sawada Masahiko


000_add_frozen_bit_into_visibilitymap_v8.patch
Description: Binary data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Pavel Stehule
2015-07-10 15:57 GMT+02:00 Shulgin, Oleksandr 
:

> 2015-07-10 14:34 GMT+02:00 Pavel Stehule :
>>
>>> Hi
>>>
>>> I am sending review of this patch:
>>>
>>> 1. I reread a previous discussion and almost all are for this patch (me
>>> too)
>>>
>>> 2. I have to fix a typo in hstore_io.c function (update attached), other
>>> (patching, regress tests) without problems
>>>
>>> My objections:
>>>
>>> 1. comments - missing comment for some basic API, basic fields like
>>> "key_scalar" and similar
>>>
>>
> I thought it was pretty obvious from the code, because it's sort of the
> only source for docs on the subject right now.  Should we add proper
> documentation section, this would have been documented for sure.
>
>
>> 2. why you did indirect call via JsonOutContext?
>>>
>>> What is benefit
>>>
>>> dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid, false);
>>>
>>> instead
>>>
>>> json_out_value(&dst, )
>>>
>>
> For consistency.  Even though we initialize the output context ourselves,
> there might be some code introduced between json_out_init_context() and
> dst.value() calls that replaces some of the callbacks, and then there would
> be a difference.
>

with this consistency? I didn't see this style everywhere in Postgres?
Isn't it premature optimization?



>
>> 3. if it should be used everywhere, then in EXPLAIN statement too.
>>>
>>
> Ahh.. good catch.  I'll have a look on this now.
>
> Thanks for the review!
>
> --
> Alex
>
>


Re: [HACKERS] LANGUAGE sql functions don't use the custom plan logic

2015-07-10 Thread Andres Freund
On 2015-07-10 09:52:30 -0400, Tom Lane wrote:
> There's a whole lot of ways in which the current implementation of
> SQL-language functions leaves things to be desired.

Yea.

> What did you run into exactly?

A generic plan was used on a partitioned table.  Normally the function
could be inlined so that wasn't noticeable, but in one case it
wasn't... It's confusing if all invocations of the same function are
reasonably fast, but one, with the numerically same values passed down,
isn't.

Andres


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Shulgin, Oleksandr
>
> 2015-07-10 14:34 GMT+02:00 Pavel Stehule :
>
>> Hi
>>
>> I am sending review of this patch:
>>
>> 1. I reread a previous discussion and almost all are for this patch (me
>> too)
>>
>> 2. I have to fix a typo in hstore_io.c function (update attached), other
>> (patching, regress tests) without problems
>>
>> My objections:
>>
>> 1. comments - missing comment for some basic API, basic fields like
>> "key_scalar" and similar
>>
>
I thought it was pretty obvious from the code, because it's sort of the
only source for docs on the subject right now.  Should we add proper
documentation section, this would have been documented for sure.


> 2. why you did indirect call via JsonOutContext?
>>
>> What is benefit
>>
>> dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid, false);
>>
>> instead
>>
>> json_out_value(&dst, )
>>
>
For consistency.  Even though we initialize the output context ourselves,
there might be some code introduced between json_out_init_context() and
dst.value() calls that replaces some of the callbacks, and then there would
be a difference.


> 3. if it should be used everywhere, then in EXPLAIN statement too.
>>
>
Ahh.. good catch.  I'll have a look on this now.

Thanks for the review!

--
Alex


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Andres Freund
On 2015-06-15 00:50:08 +0200, Petr Jelinek wrote:
> + /* Create and execute new CREATE EXTENSION 
> statement. */
> + ces = makeNode(CreateExtensionStmt);
> + ces->extname = curreq;
> + ces->if_not_exists = false;
> + parents = lappend(list_copy(recursive_parents), 
> stmt->extname);
> + ces->options = 
> list_make1(makeDefElem("recursive",
> + 
>   (Node *) parents));
> + CreateExtension(ces);

I think we should copy the SCHEMA option here and document that we use
the same schema. But it needs to be done in a way that doesn't error out
if the extension is not relocatable...


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] LANGUAGE sql functions don't use the custom plan logic

2015-07-10 Thread Tom Lane
Andres Freund  writes:
> I recently was forcfully reminded that sql functions don't use the
> plancache.c infrastructure. I was sort of aware of that, but I didn't
> think far enough ahead that that also implies we'll not use custom plans
> for statements with parameters when it'd be helpful.

There's a whole lot of ways in which the current implementation of
SQL-language functions leaves things to be desired.  What did you
run into exactly?

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Tom Lane
Michael Paquier  writes:
> On Fri, Jul 10, 2015 at 10:09 PM, Heikki Linnakangas 
>> This seems quite reasonable, but I have to ask: How many extensions are
>> there out there that depend on another extension? Off the top of my head, I
>> can't think of any..

> With transforms there are such dependencies, and there are 3 in contrib/:
> hstore_plperl, hstore_plpython and ltree_plpython.

It's reasonable to expect that such cases will become more common as the
extension community matures.  It wasn't something we especially had to
worry about in the initial implementation of extensions, but it seems
totally worthwhile to me to add it now.

FWIW, I agree with using "CASCADE" to signal a request to create required
extension(s) automatically.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Freeze avoidance of very large table.

2015-07-10 Thread Fujii Masao
On Fri, Jul 10, 2015 at 2:41 PM, Sawada Masahiko  wrote:
> On Fri, Jul 10, 2015 at 3:05 AM, Sawada Masahiko  
> wrote:
>>
>> Also something for pg_upgrade is also not yet.
>>
>> TODO
>> - Test case for this feature
>> - pg_upgrade support.
>>
>
> I had forgotten to change the fork name of visibility map to "vfm".
> Attached latest v7 patch.
> Please review it.

The compilation failed on my machine...

gcc -Wall -Wmissing-prototypes -Wpointer-arith
-Wdeclaration-after-statement -Wendif-labels
-Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
-fwrapv -g -O0 -I../../../../src/include -D_GNU_SOURCE   -c -o
visibilitymap.o visibilitymap.c
make[4]: *** No rule to make target `heapfuncs.o', needed by
`objfiles.txt'.  Stop.
make[4]: *** Waiting for unfinished jobs
( echo src/backend/access/index/genam.o
src/backend/access/index/indexam.o ) >objfiles.txt
make[4]: Leaving directory `/home/postgres/pgsql/git/src/backend/access/index'
gcc -Wall -Wmissing-prototypes -Wpointer-arith
-Wdeclaration-after-statement -Wendif-labels
-Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
-fwrapv -g -O0 -I../../../src/include -D_GNU_SOURCE   -c -o
tablespace.o tablespace.c
gcc -Wall -Wmissing-prototypes -Wpointer-arith
-Wdeclaration-after-statement -Wendif-labels
-Wmissing-format-attribute -Wformat-security -fno-strict-aliasing
-fwrapv -g -O0 -I../../../src/include -D_GNU_SOURCE   -c -o
instrument.o instrument.c
make[4]: Leaving directory `/home/postgres/pgsql/git/src/backend/access/heap'
make[3]: *** [heap-recursive] Error 2
make[3]: Leaving directory `/home/postgres/pgsql/git/src/backend/access'
make[2]: *** [access-recursive] Error 2
make[2]: *** Waiting for unfinished jobs

Regards,

-- 
Fujii Masao


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WIP: Enhanced ALTER OPERATOR

2015-07-10 Thread Uriy Zhuravlev
Hello.

On Friday 10 July 2015 15:46:35 you wrote:
> * I think it'd be better to use NONE instead of NULL above, to remove
> the estimator. It seems inconsistent when we've used NONE to mean
> "missing" earlier in the same statement.

Ok, you right. 

> * The way you check for the NULL in OperatorUpd is wrong. It cannot
> distinguish between a quoted "null", meaning a function called "null",
> and a NULL, meaning no function. 
With "none" has a similar problem. I need to correct the grammar.

> 
> Attached is a new version of your patch, rebased over current master,
> and the docs formatting fixes.

Thanks. I hope to send the new patch on Monday. 

-- 
Uriy Zhuravlev
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Vladimir Borodin

> 10 июля 2015 г., в 16:09, Heikki Linnakangas  написал(а):
> 
> On 07/09/2015 07:05 PM, Petr Jelinek wrote:
>> On 2015-07-07 15:41, Andres Freund wrote:
>>> On 2015-07-07 22:36:29 +0900, Fujii Masao wrote:
 On Mon, Jun 15, 2015 at 7:50 AM, Petr Jelinek  wrote:
> Hi,
> 
> I am getting tired installing manually required extensions manually. I was
> wondering if we might want to add option to CREATE SEQUENCE that would 
> allow
> automatic creation of the extensions required by the extension that is 
> being
> installed by the user.
 
 I'm wondering how much helpful this feature is. Because, even if we can 
 save
 some steps for CREATE EXTENSION by using the feature, we still need to
 manually find out, download and install all the extensions that the target
 extension depends on. So isn't it better to implement the tool like yum, 
 i.e.,
 which performs all those steps almost automatically, rather than the 
 proposed
 feature? Maybe it's outside PostgreSQL core.
>>> 
>>> That doesn't seem to make much sense to me. Something like yum can't
>>> install everything in all relevant databases. Sure, yum will be used to
>>> install dependencies between extensions on the filesystem level.
>>> 
>>> At the minimum I'd like to see that CREATE EXTENSION foo; would install
>>> install extension 'bar' if foo dependended on 'bar' if CASCADE is
>>> specified. Right now we always error out saying that the dependency on
>>> 'bar' is not fullfilled - not particularly helpful.
>> 
>> That's what the proposed patch does (with slightly different syntax but
>> syntax is something that can be changed easily).
> 
> This seems quite reasonable, but I have to ask: How many extensions are there 
> out there that depend on another extension? Off the top of my head, I can't 
> think of any..

pg_stat_kcache depends on pg_stat_statements, for example.

> 
> - Heikki
> 
> 
> -- 
> Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org 
> )
> To make changes to your subscription:
> http://www.postgresql.org/mailpref/pgsql-hackers 
> 

--
May the force be with you…
https://simply.name



Re: [HACKERS] One question about security label command

2015-07-10 Thread Heikki Linnakangas

On 05/13/2015 03:49 PM, Kohei KaiGai wrote:

2015-05-13 21:45 GMT+09:00 Robert Haas :

Can you add this to the next CommitFest?


OK, done

https://commitfest.postgresql.org/5/249/


Aaand the commitfest has began..

Stephen, would you have the time to review this patch, and commit if 
appropriate, please? And if you could set up the buildfarm animal to run 
this, even better.


- Heikki



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Andres Freund
On 2015-07-10 16:09:48 +0300, Heikki Linnakangas wrote:
> This seems quite reasonable, but I have to ask: How many extensions are
> there out there that depend on another extension? Off the top of my head, I
> can't think of any..

BDR/UDR is one (or two depending on your POV).

I think a part of why there are not more is that the dependencies right
now are painful.


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Michael Paquier
On Fri, Jul 10, 2015 at 10:09 PM, Heikki Linnakangas 
wrote:

> On 07/09/2015 07:05 PM, Petr Jelinek wrote:
>
>> On 2015-07-07 15:41, Andres Freund wrote:
>>
>>> On 2015-07-07 22:36:29 +0900, Fujii Masao wrote:
>>>
 On Mon, Jun 15, 2015 at 7:50 AM, Petr Jelinek 
 wrote:

> Hi,
>
> I am getting tired installing manually required extensions manually. I
> was
> wondering if we might want to add option to CREATE SEQUENCE that would
> allow
> automatic creation of the extensions required by the extension that is
> being
> installed by the user.
>

 I'm wondering how much helpful this feature is. Because, even if we can
 save
 some steps for CREATE EXTENSION by using the feature, we still need to
 manually find out, download and install all the extensions that the
 target
 extension depends on. So isn't it better to implement the tool like
 yum, i.e.,
 which performs all those steps almost automatically, rather than the
 proposed
 feature? Maybe it's outside PostgreSQL core.

>>>
>>> That doesn't seem to make much sense to me. Something like yum can't
>>> install everything in all relevant databases. Sure, yum will be used to
>>> install dependencies between extensions on the filesystem level.
>>>
>>> At the minimum I'd like to see that CREATE EXTENSION foo; would install
>>> install extension 'bar' if foo dependended on 'bar' if CASCADE is
>>> specified. Right now we always error out saying that the dependency on
>>> 'bar' is not fullfilled - not particularly helpful.
>>>
>>
>> That's what the proposed patch does (with slightly different syntax but
>> syntax is something that can be changed easily).
>>
>
> This seems quite reasonable, but I have to ask: How many extensions are
> there out there that depend on another extension? Off the top of my head, I
> can't think of any..
>

With transforms there are such dependencies, and there are 3 in contrib/:
hstore_plperl, hstore_plpython and ltree_plpython.
-- 
Michael


Re: [HACKERS] creating extension including dependencies

2015-07-10 Thread Heikki Linnakangas

On 07/09/2015 07:05 PM, Petr Jelinek wrote:

On 2015-07-07 15:41, Andres Freund wrote:

On 2015-07-07 22:36:29 +0900, Fujii Masao wrote:

On Mon, Jun 15, 2015 at 7:50 AM, Petr Jelinek  wrote:

Hi,

I am getting tired installing manually required extensions manually. I was
wondering if we might want to add option to CREATE SEQUENCE that would allow
automatic creation of the extensions required by the extension that is being
installed by the user.


I'm wondering how much helpful this feature is. Because, even if we can save
some steps for CREATE EXTENSION by using the feature, we still need to
manually find out, download and install all the extensions that the target
extension depends on. So isn't it better to implement the tool like yum, i.e.,
which performs all those steps almost automatically, rather than the proposed
feature? Maybe it's outside PostgreSQL core.


That doesn't seem to make much sense to me. Something like yum can't
install everything in all relevant databases. Sure, yum will be used to
install dependencies between extensions on the filesystem level.

At the minimum I'd like to see that CREATE EXTENSION foo; would install
install extension 'bar' if foo dependended on 'bar' if CASCADE is
specified. Right now we always error out saying that the dependency on
'bar' is not fullfilled - not particularly helpful.


That's what the proposed patch does (with slightly different syntax but
syntax is something that can be changed easily).


This seems quite reasonable, but I have to ask: How many extensions are 
there out there that depend on another extension? Off the top of my 
head, I can't think of any..


- Heikki


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Support for N synchronous standby servers - take 2

2015-07-10 Thread Beena Emerson
Hello,

Tue, Jul 7, 2015 at 02:56 AM, Josh Berkus wrote:
> pro-JSON: 
> 
> * standard syntax which is recognizable to sysadmins and devops. 
> * can use JSON/JSONB functions with ALTER SYSTEM SET to easily make 
> additions/deletions from the synch rep config. 
> * can add group labels (see below) 

Adding group labels do have a lot of values but as Amit has pointed out,
with little modification, they can be included in GUC as well. It will not
make it any more complex.

On Tue, Jul 7, 2015 at 2:19 PM, Michael Paquier wrote:

> Something like pg_syncinfo/ coupled with a LW lock, we already do 
> something similar for replication slots with pg_replslot/.

I was trying to figure out how the JSON metadata can be used.
It would have to be set using a given set of functions. Right?
I am sorry this question is very basic.

The functions could be something like:
1. pg_add_synch_set(set_name NAME, quorum INT, is_priority bool, set_members
VARIADIC)

This will be used to add a sync set. The set_members can be individual
elements of another set name. The parameter is_priority is used to decide
whether the set is priority (true) set or quorum (false). This function call
will  create a folder pg_syncinfo/groups/$NAME and store the json blob? 

The root group would be automatically sset by finding the group which is not
included in other groups? or can be set by another function?

2. pg_modify_sync_set(set_name NAME, quorum INT, is_priority bool,
set_members VARIADIC)

This will update the pg_syncinfo/groups/$NAME to store the new values.

3. pg_drop_synch_set(set_name NAME)

This will update the pg_syncinfo/groups/$NAME folder. Also all the groups
which included this would be updated?

4. pg_show_synch_set()

this will display the current sync setting in json format.

Am I missing something?

Is JSON being preferred because it would be ALTER SYSTEM friendly and in a
format already known to users?

In a real-life scenario, at most how many groups and nesting would be
expected? 



-
Beena Emerson

--
View this message in context: 
http://postgresql.nabble.com/Support-for-N-synchronous-standby-servers-take-2-tp5849384p5857516.html
Sent from the PostgreSQL - hackers mailing list archive at Nabble.com.


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Minor code improvements to create_foreignscan_plan/ExecInitForeignScan

2015-07-10 Thread David Rowley
On 10 July 2015 at 21:40, Etsuro Fujita  wrote:

> To save cycles, I modified create_foreignscan_plan so that it detects
> whether any system columns are requested if scanning a base relation.
> Also, I revised other code there a little bit.
>
> For ExecInitForeignScan, I simplified the code there to determine the
> scan tuple type, whith seems to me complex beyound necessity.  Maybe
> that might be nitpicking, though.
>
>
I just glanced at this and noticed that the method for determining if
there's any system columns could be made a bit nicer.

/* Now, are any system columns requested from rel? */
scan_plan->fsSystemCol = false;
for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
{
if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_used))
{
scan_plan->fsSystemCol = true;
break;
}
}

I think could just be written as:
 /* Now, are any system columns requested from rel? */
if (!bms_is_empty(attrs_used) &&
bms_next_member(attrs_used, -1) < -FirstLowInvalidHeapAttributeNumber)
scan_plan->fsSystemCol = true;
else
scan_plan->fsSystemCol = false;

I know you didn't change this, but just thought I'd mention it while
there's an opportunity to fix it.

Regards

David Rowley

--
 David Rowley   http://www.2ndQuadrant.com/

 PostgreSQL Development, 24x7 Support, Training & Services


Re: [HACKERS] Fillfactor for GIN indexes

2015-07-10 Thread Michael Paquier
On Fri, Jul 10, 2015 at 7:13 PM, Alexander Korotkov 
wrote:

> On Fri, Jul 10, 2015 at 4:54 AM, Michael Paquier <
> michael.paqu...@gmail.com> wrote:
>
>>
>>
>> On Thu, Jul 9, 2015 at 10:33 PM, Alexander Korotkov wrote:
>> > [...]
>>
>> + /* Caclculate max data size on page according to fillfactor */
>> s/Caclculate/Calculate
>>
>> When creating a simple gin index, I am seeing that GinGetMaxDataSize gets
>> called with ginEntryInsert:
>>   * frame #0: 0x00010a49d72e
>> postgres`GinGetMaxDataSize(index=0x000114168378, isBuild='\x01') + 14
>> at gindatapage.c:436
>> frame #1: 0x00010a49ecbe
>> postgres`createPostingTree(index=0x000114168378,
>> items=0x000114a9c038, nitems=35772, buildStats=0x7fff55787350) +
>> 302 at gindatapage.c:1754
>> frame #2: 0x00010a493423
>> postgres`buildFreshLeafTuple(ginstate=0x7fff55784d90, attnum=1,
>> key=2105441, category='\0', items=0x000114a9c038, nitem=35772,
>> buildStats=0x7fff55787350) + 339 at gininsert.c:158
>> frame #3: 0x00010a492f84
>> postgres`ginEntryInsert(ginstate=0x7fff55784d90, attnum=1, key=2105441,
>> category='\0', items=0x000114a9c038, nitem=35772,
>> buildStats=0x7fff55787350) + 916 at gininsert.c:228
>>
>> And as far as I can see GinGetMaxDataSize uses isBuild:
>> + int
>> + GinGetMaxDataSize(Relation index, bool isBuild)
>> + {
>> + int fillfactor, result;
>> +
>> + /* Grab option value which affects only index build */
>> + if (!isBuild)
>> However isBuild is not set in this code path, see
>> http://www.postgresql.org/message-id/cab7npqsc4vq9mhkqm_yvafcteho-iuy8skbxydnmgnai1xn...@mail.gmail.com
>> where I reported the problem. So this code is somewhat broken, no? I am
>> attaching to this email the patch in question that should be applied on top
>> of Alexander's latest version.
>>
>
> I didn't get what is problem. Was this stacktrace during index build? If
> so, GinGetMaxDataSize really should get isBuild='\x01'. It is set by
> following line
>
> maxdatasize = GinGetMaxDataSize(index, buildStats ? true: false);
>

Yeah, I just find confusing that we actually rely on the fact that
buildStats is NULL or not here instead of passing directly the isBuild flag
of GinBtree for example. But that's a point of detail..
-- 
Michael


Re: ctidscan as an example of custom-scan (Re: [HACKERS] [v9.5] Custom Plan API)

2015-07-10 Thread Heikki Linnakangas
On 07/02/2015 08:21 AM, Kouhei Kaigai wrote:
> Folks,
> 
>> Moved this patch to next CF 2015-02 because of lack of review(ers).
>>
> Do we still need this patch as contrib module?
> It was originally required it as example of custom-scan interface last
> summer, however, here was no strong requirement after that, then, it
> was bumped to v9.6 development cycle.
> 
> If somebody still needs it, I'll rebase and adjust the patch towards
> the latest custom-scan interface. However, I cannot be motivated for
> the feature nobody wants.

Robert, can you weigh in on this? Do we currently have anything in the
tree that tests the Custom Scan interface? If not, would this be helpful
for that purpose?

- Heikki



-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WIP: Enhanced ALTER OPERATOR

2015-07-10 Thread Heikki Linnakangas

On 07/06/2015 07:21 PM, Uriy Zhuravlev wrote:

Hello hackers.

This is the fifth version of the patch (the fourth was unsuccessful :)).
I added documentation and was held a small refactoring.


I edited the formatting of the syntax page a bit, and came up with this:

ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } )
SET ( {  RESTRICT = { res_proc | NULL }
   | JOIN = { join_proc | NULL }
 } [, ... ] )

A couple of minor issues with the syntax itself:

* I think it'd be better to use NONE instead of NULL above, to remove 
the estimator. It seems inconsistent when we've used NONE to mean 
"missing" earlier in the same statement.


* The way you check for the NULL in OperatorUpd is wrong. It cannot 
distinguish between a quoted "null", meaning a function called "null", 
and a NULL, meaning no function. (You can argue that you're just asking 
for trouble if you name any function "null", but we're careful to deal 
with that correctly everywhere else.) You don't have the information 
about quoting once you leave the parser, so you'll have to distinguish 
those in the grammar.


Attached is a new version of your patch, rebased over current master, 
and the docs formatting fixes.


- Heikki

diff --git a/doc/src/sgml/ref/alter_operator.sgml b/doc/src/sgml/ref/alter_operator.sgml
index 8a7af50..b8c353f 100644
--- a/doc/src/sgml/ref/alter_operator.sgml
+++ b/doc/src/sgml/ref/alter_operator.sgml
@@ -26,6 +26,11 @@ ALTER OPERATOR name ( { left_typename ( { left_type | NONE } , { right_type | NONE } )
 SET SCHEMA new_schema
+
+ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } )
+SET ( {  RESTRICT = { res_proc | NONE }
+   | JOIN = { join_proc | NONE }
+ } [, ... ] )
 
  
 
@@ -34,8 +39,7 @@ ALTER OPERATOR name ( { left_type
ALTER OPERATOR changes the definition of
-   an operator.  The only currently available functionality is to change the
-   owner of the operator.
+   an operator.
   
 
   
@@ -98,6 +102,25 @@ ALTER OPERATOR name ( { left_type
 

+
+   
+ res_proc
+ 
+   
+ The restriction selectivity estimator function for this operator; write NONE to remove existing selectivity estimator.
+   
+  
+   
+
+   
+ join_proc
+ 
+   
+ The join selectivity estimator function for this operator; write NONE to remove existing selectivity estimator.
+   
+ 
+   
+
   
  
 
@@ -109,6 +132,13 @@ ALTER OPERATOR name ( { left_type
 ALTER OPERATOR @@ (text, text) OWNER TO joe;
 
+
+  
+Change the restriction and join selectivity estimator function of a custom operator a && b for type int[]:
+
+ALTER OPERATOR && (_int4, _int4) SET (RESTRICT = _int_contsel, JOIN = _int_contjoinsel);
+
+
  
 
  
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 072f530..b981a44 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -28,8 +28,10 @@
 #include "catalog/pg_operator.h"
 #include "catalog/pg_proc.h"
 #include "catalog/pg_type.h"
+#include "commands/defrem.h"
 #include "miscadmin.h"
 #include "parser/parse_oper.h"
+#include "parser/parse_func.h"
 #include "utils/acl.h"
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
@@ -53,7 +55,7 @@ static Oid OperatorShellMake(const char *operatorName,
   Oid leftTypeId,
   Oid rightTypeId);
 
-static void OperatorUpd(Oid baseId, Oid commId, Oid negId);
+static void ShellOperatorUpd(Oid baseId, Oid commId, Oid negId);
 
 static Oid get_other_operator(List *otherOp,
    Oid otherLeftTypeId, Oid otherRightTypeId,
@@ -563,7 +565,7 @@ OperatorCreate(const char *operatorName,
 		commutatorId = operatorObjectId;
 
 	if (OidIsValid(commutatorId) || OidIsValid(negatorId))
-		OperatorUpd(operatorObjectId, commutatorId, negatorId);
+		ShellOperatorUpd(operatorObjectId, commutatorId, negatorId);
 
 	return address;
 }
@@ -633,7 +635,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
 }
 
 /*
- * OperatorUpd
+ * ShellOperatorUpd
  *
  *	For a given operator, look up its negator and commutator operators.
  *	If they are defined, but their negator and commutator fields
@@ -642,7 +644,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
  *	which are the negator or commutator of each other.
  */
 static void
-OperatorUpd(Oid baseId, Oid commId, Oid negId)
+ShellOperatorUpd(Oid baseId, Oid commId, Oid negId)
 {
 	int			i;
 	Relation	pg_operator_desc;
@@ -864,3 +866,149 @@ makeOperatorDependencies(HeapTuple tuple)
 
 	return myself;
 }
+
+/*
+ * Operator update aka ALTER OPERATOR for RESTRICT, JOIN
+ */
+void
+OperatorUpd(Oid classId, Oid baseId, List *operator_params)
+{
+	int			i;
+	ListCell	*pl;
+	Relation	catalog;
+	HeapTuple	tup;
+	Oid 		operator_param_id = 0;
+	bool		nulls[Natts_pg_operator];
+	bool		replaces[Natts_pg_operator];
+	Datum		values[Natts_pg_operator];
+
+	for (i = 0; i < Natts_pg_operator; ++i)
+

Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Pavel Stehule
forgotten attachment

Regards

Pavel

2015-07-10 14:34 GMT+02:00 Pavel Stehule :

> Hi
>
> I am sending review of this patch:
>
> 1. I reread a previous discussion and almost all are for this patch (me
> too)
>
> 2. I have to fix a typo in hstore_io.c function (update attached), other
> (patching, regress tests) without problems
>
> My objections:
>
> 1. comments - missing comment for some basic API, basic fields like
> "key_scalar" and similar
> 2. why you did indirect call via JsonOutContext?
>
> What is benefit
>
> dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid, false);
>
> instead
>
> json_out_value(&dst, )
>
> ? Is it necessary?
>
> 3. if it should be used everywhere, then in EXPLAIN statement too.
>
> Regards
>
> Pavel
>
>
> 2015-07-10 6:31 GMT+02:00 Pavel Stehule :
>
>>
>>
>> 2015-07-03 12:27 GMT+02:00 Heikki Linnakangas :
>>
>>> On 05/27/2015 09:51 PM, Andrew Dunstan wrote:
>>>

 On 05/27/2015 02:37 PM, Robert Haas wrote:

> On Tue, May 26, 2015 at 2:50 AM, Shulgin, Oleksandr
>  wrote:
>
>> Is it reasonable to add this patch to CommitFest now?
>>
> It's always reasonable to add a patch to the CommitFest if you would
> like for it to be reviewed and avoid having it get forgotten about.
> There seems to be some disagreement about whether we want this, but
> don't let that stop you from adding it to the next CommitFest.
>

 I'm not dead set against it either. When I have time I will take a
 closer look.

>>>
>>> Andrew, will you have the time to review this? Please add yourself as
>>> reviewer in the commitfest app if you do.
>>>
>>> My 2 cents is that I agree with your initial reaction: This is a lot of
>>> infrastructure and generalizing things, for little benefit. Let's change
>>> the current code where we generate JSON to be consistent with whitespace,
>>> and call it a day.
>>>
>>
>> I am  thinking so it is not bad idea. This code can enforce uniform
>> format, and it can check if produced value is correct. It can be used in
>> our code, it can be used by extension's developers.
>>
>> This patch is not small, but really new lines are not too much.
>>
>> I'll do review today.
>>
>> Regards
>>
>> Pavel
>>
>>
>>
>>
>>> - Heikki
>>>
>>>
>>> --
>>> Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
>>> To make changes to your subscription:
>>> http://www.postgresql.org/mailpref/pgsql-hackers
>>>
>>
>>
>
diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c
new file mode 100644
index 7d89867..0ca223f
*** a/contrib/hstore/hstore_io.c
--- b/contrib/hstore/hstore_io.c
*** hstore_to_json_loose(PG_FUNCTION_ARGS)
*** 1241,1286 
  	int			count = HS_COUNT(in);
  	char	   *base = STRPTR(in);
  	HEntry	   *entries = ARRPTR(in);
! 	StringInfoData tmp,
! dst;
  
  	if (count == 0)
  		PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
  
  	initStringInfo(&tmp);
! 	initStringInfo(&dst);
! 
! 	appendStringInfoChar(&dst, '{');
  
  	for (i = 0; i < count; i++)
  	{
  		resetStringInfo(&tmp);
  		appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
! 		escape_json(&dst, tmp.data);
! 		appendStringInfoString(&dst, ": ");
  		if (HS_VALISNULL(entries, i))
! 			appendStringInfoString(&dst, "null");
  		/* guess that values of 't' or 'f' are booleans */
  		else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
! 			appendStringInfoString(&dst, "true");
  		else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f')
! 			appendStringInfoString(&dst, "false");
  		else
  		{
  			resetStringInfo(&tmp);
  			appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
  			if (IsValidJsonNumber(tmp.data, tmp.len))
! appendBinaryStringInfo(&dst, tmp.data, tmp.len);
  			else
! escape_json(&dst, tmp.data);
  		}
- 
- 		if (i + 1 != count)
- 			appendStringInfoString(&dst, ", ");
  	}
- 	appendStringInfoChar(&dst, '}');
  
! 	PG_RETURN_TEXT_P(cstring_to_text(dst.data));
  }
  
  PG_FUNCTION_INFO_V1(hstore_to_json);
--- 1241,1289 
  	int			count = HS_COUNT(in);
  	char	   *base = STRPTR(in);
  	HEntry	   *entries = ARRPTR(in);
! 	StringInfoData	tmp;
! 	JsonOutContext	dst;
  
  	if (count == 0)
  		PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
  
  	initStringInfo(&tmp);
! 	json_out_init_context(&dst, JSON_OUT_USE_SPACES);
! 	dst.object_start(&dst);
  
  	for (i = 0; i < count; i++)
  	{
  		resetStringInfo(&tmp);
  		appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
! 		json_out_cstring(&dst, tmp.data, true);
! 
  		if (HS_VALISNULL(entries, i))
! 			dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid, false);
! 
  		/* guess that values of 't' or 'f' are booleans */
  		else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
! 			dst.value(&dst, BoolGetDatum(true), JSONTYPE_BOOL,
! 	  InvalidOid, InvalidOid, false);
! 
  		el

Re: [HACKERS] [PATCH] Generalized JSON output functions

2015-07-10 Thread Pavel Stehule
Hi

I am sending review of this patch:

1. I reread a previous discussion and almost all are for this patch (me too)

2. I have to fix a typo in hstore_io.c function (update attached), other
(patching, regress tests) without problems

My objections:

1. comments - missing comment for some basic API, basic fields like
"key_scalar" and similar
2. why you did indirect call via JsonOutContext?

What is benefit

dst.value(&dst, (Datum) 0, JSONTYPE_NULL, InvalidOid, InvalidOid, false);

instead

json_out_value(&dst, )

? Is it necessary?

3. if it should be used everywhere, then in EXPLAIN statement too.

Regards

Pavel


2015-07-10 6:31 GMT+02:00 Pavel Stehule :

>
>
> 2015-07-03 12:27 GMT+02:00 Heikki Linnakangas :
>
>> On 05/27/2015 09:51 PM, Andrew Dunstan wrote:
>>
>>>
>>> On 05/27/2015 02:37 PM, Robert Haas wrote:
>>>
 On Tue, May 26, 2015 at 2:50 AM, Shulgin, Oleksandr
  wrote:

> Is it reasonable to add this patch to CommitFest now?
>
 It's always reasonable to add a patch to the CommitFest if you would
 like for it to be reviewed and avoid having it get forgotten about.
 There seems to be some disagreement about whether we want this, but
 don't let that stop you from adding it to the next CommitFest.

>>>
>>> I'm not dead set against it either. When I have time I will take a
>>> closer look.
>>>
>>
>> Andrew, will you have the time to review this? Please add yourself as
>> reviewer in the commitfest app if you do.
>>
>> My 2 cents is that I agree with your initial reaction: This is a lot of
>> infrastructure and generalizing things, for little benefit. Let's change
>> the current code where we generate JSON to be consistent with whitespace,
>> and call it a day.
>>
>
> I am  thinking so it is not bad idea. This code can enforce uniform
> format, and it can check if produced value is correct. It can be used in
> our code, it can be used by extension's developers.
>
> This patch is not small, but really new lines are not too much.
>
> I'll do review today.
>
> Regards
>
> Pavel
>
>
>
>
>> - Heikki
>>
>>
>> --
>> Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
>> To make changes to your subscription:
>> http://www.postgresql.org/mailpref/pgsql-hackers
>>
>
>


Re: [HACKERS] Fillfactor for GIN indexes

2015-07-10 Thread Heikki Linnakangas

On 07/10/2015 01:13 PM, Alexander Korotkov wrote:

On Fri, Jul 10, 2015 at 4:54 AM, Michael Paquier 
wrote:

+ #define GIN_MIN_FILLFACTOR20
+ #define GIN_DEFAULT_FILLFACTOR90
I am still worrying about using a default fillfactor at 90, as we did a
lot of promotion about compression of GIN indexes in 9.4. Feel free to
ignore this comment if you think that 90 is a good default. The difference
is visibly in order of MBs even for large indexes still, this is changing
the default of 9.4 and 9.5.


On the other side, it's unclear why should we have fillfactor distinct from
btree..


Good question. One argument is because the items on a GIN posting page 
are much smaller (2-3 bytes typically) than index tuples in a B-tree 
page (32 bytes or more). By a rough calculation, in the 10% free space 
you leave in an index page, which is about 800 bytes, you can insert at 
most 25 or so tuples. In the same amount of free space in a GIN page, 
you can fit hundreds items. The fillfactor is particularly useful to 
avoid splitting a lot pages after creating a new index, when you do a 
few random updates.


Another argument is that for the typical use cases of GIN, tuples are 
not updated as often as with B-tree indexes.


A third argument is that before this patch, we always packed the index 
as full as possible (i.e. fillfactor=100), and we want to avoid 
"changing the default" so much.


I'm not sure any of those arguments are very strong, but my gut feeling 
is that 90 might indeed be too small. Perhaps set the default to 95..


I'm curious why the minimum in the patch is 20, while the minimum for 
b-tree is 10, though. I don't see any particularly good reason for that.


- Heikki



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] security labels on databases are bad for dump & restore

2015-07-10 Thread Andres Freund
Hi,

pg_dump dumps security labels on databases. Which makes sense. The
problem is that they're dumped including the database name.

Which means that if you dump a database and restore it into a
differently named one you'll either get a failure because the database
does not exist, or worse you'll update the label of the wrong database.

So I think we need CURRENT_DATABASE (or similar) support for security
labels on databases.

I won't have time to do anything about this anytime soon, but I think we
should fix that at some point.  Shall I put this on the todo? Or do we
want to create an 'open items' page that's not major version specific?

Greetings,

Andres Freund


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] LANGUAGE sql functions don't use the custom plan logic

2015-07-10 Thread Andres Freund
Hi,

I recently was forcfully reminded that sql functions don't use the
plancache.c infrastructure. I was sort of aware of that, but I didn't
think far enough ahead that that also implies we'll not use custom plans
for statements with parameters when it'd be helpful.

That's imo quite the trap. Should we document it somewhere?

Greetings,

Andres Freund


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Re: Removing SSL renegotiation (Was: Should we back-patch SSL renegotiation fixes?)

2015-07-10 Thread Andres Freund
On 2015-07-01 23:32:23 -0400, Noah Misch wrote:
> We'd need to be triply confident that we know better than the DBA before
> removing flexibility in back branches.
> +1 for just changing the default.

I think we do. But I also think that I pretty clearly lost this
argument, so let's just change the default.

Is anybody willing to work on this?


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WAL logging problem in 9.4.3?

2015-07-10 Thread Andres Freund
On 2015-07-10 13:38:50 +0300, Heikki Linnakangas wrote:
> In the long-term, I'd like to refactor this whole thing so that we never
> WAL-log any operations on a relation that's created in the same transaction
> (when wal_level=minimal). Instead, at COMMIT, we'd fsync() the relation, or
> if it's smaller than some threshold, WAL-log the contents of the whole file
> at that point. That would move all that
> more-difficult-than-it-seems-at-first-glance logic from COPY and indexam's
> to a central location, and it would allow the same optimization for all
> operations, not just COPY. But that probably isn't feasible to backpatch.

I don't think that's really realistic until we have a buffer manager
that lets you efficiently scan for all pages of a relation :(


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WAL logging problem in 9.4.3?

2015-07-10 Thread Andres Freund
On 2015-07-10 19:23:28 +0900, Fujii Masao wrote:
> Maybe I'm missing something. But I start wondering why TRUNCATE
> and INSERT (or even all the operations on the table created at
> the current transaction) need to be WAL-logged while COPY can be
> optimized. If no WAL records are generated on that table, the problem
> we're talking about seems not to occur. Also this seems safe and
> doesn't degrade the performance of data loading. Thought?

Skipping WAL logging means that you need to scan through the whole
shrared buffers to write out dirty buffers and fsync the segments. A
single insert wal record is a couple orders of magnitudes cheaper than
that.  Essentially doing this juts for COPY is a heuristic.


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WAL logging problem in 9.4.3?

2015-07-10 Thread Fujii Masao
On Fri, Jul 10, 2015 at 2:27 AM, Tom Lane  wrote:
> Fujii Masao  writes:
>> On Tue, Jul 7, 2015 at 12:49 AM, Tom Lane  wrote:
>>> One idea I had was to allow the COPY optimization only if the heap file is
>>> physically zero-length at the time the COPY starts.
>
>> This seems not helpful for the case where TRUNCATE is executed
>> before COPY. No?
>
> Huh?  The heap file would be zero length in that case.
>
>> So, if COPY is executed multiple times at the same transaction,
>> only first COPY can be optimized?
>
> This is true, and I don't think we should care, especially not if we're
> going to take risks of incorrect behavior in order to optimize that
> third-order case.  The fact that we're dealing with this bug at all should
> remind us that this stuff is harder than it looks.  I want a simple,
> reliable, back-patchable fix, and I do not believe that what you are
> suggesting would be any of those.

Maybe I'm missing something. But I start wondering why TRUNCATE
and INSERT (or even all the operations on the table created at
the current transaction) need to be WAL-logged while COPY can be
optimized. If no WAL records are generated on that table, the problem
we're talking about seems not to occur. Also this seems safe and
doesn't degrade the performance of data loading. Thought?

Regards,

-- 
Fujii Masao


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Fillfactor for GIN indexes

2015-07-10 Thread Alexander Korotkov
On Fri, Jul 10, 2015 at 4:54 AM, Michael Paquier 
wrote:

>
>
> On Thu, Jul 9, 2015 at 10:33 PM, Alexander Korotkov wrote:
> > [...]
>
> + /* Caclculate max data size on page according to fillfactor */
> s/Caclculate/Calculate
>
> When creating a simple gin index, I am seeing that GinGetMaxDataSize gets
> called with ginEntryInsert:
>   * frame #0: 0x00010a49d72e
> postgres`GinGetMaxDataSize(index=0x000114168378, isBuild='\x01') + 14
> at gindatapage.c:436
> frame #1: 0x00010a49ecbe
> postgres`createPostingTree(index=0x000114168378,
> items=0x000114a9c038, nitems=35772, buildStats=0x7fff55787350) +
> 302 at gindatapage.c:1754
> frame #2: 0x00010a493423
> postgres`buildFreshLeafTuple(ginstate=0x7fff55784d90, attnum=1,
> key=2105441, category='\0', items=0x000114a9c038, nitem=35772,
> buildStats=0x7fff55787350) + 339 at gininsert.c:158
> frame #3: 0x00010a492f84
> postgres`ginEntryInsert(ginstate=0x7fff55784d90, attnum=1, key=2105441,
> category='\0', items=0x000114a9c038, nitem=35772,
> buildStats=0x7fff55787350) + 916 at gininsert.c:228
>
> And as far as I can see GinGetMaxDataSize uses isBuild:
> + int
> + GinGetMaxDataSize(Relation index, bool isBuild)
> + {
> + int fillfactor, result;
> +
> + /* Grab option value which affects only index build */
> + if (!isBuild)
> However isBuild is not set in this code path, see
> http://www.postgresql.org/message-id/cab7npqsc4vq9mhkqm_yvafcteho-iuy8skbxydnmgnai1xn...@mail.gmail.com
> where I reported the problem. So this code is somewhat broken, no? I am
> attaching to this email the patch in question that should be applied on top
> of Alexander's latest version.
>

I didn't get what is problem. Was this stacktrace during index build? If
so, GinGetMaxDataSize really should get isBuild='\x01'. It is set by
following line

maxdatasize = GinGetMaxDataSize(index, buildStats ? true: false);


> + #define GIN_MIN_FILLFACTOR20
> + #define GIN_DEFAULT_FILLFACTOR90
> I am still worrying about using a default fillfactor at 90, as we did a
> lot of promotion about compression of GIN indexes in 9.4. Feel free to
> ignore this comment if you think that 90 is a good default. The difference
> is visibly in order of MBs even for large indexes still, this is changing
> the default of 9.4 and 9.5.
>

On the other side, it's unclear why should we have fillfactor distinct from
btree..

--
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company


Re: [HACKERS] Freeze avoidance of very large table.

2015-07-10 Thread Simon Riggs
On 10 July 2015 at 09:49, Sawada Masahiko  wrote:

>
> > It is a minor thing, but if there is no other use for this fourth
> > "bit-space", it seems a shame to waste it when there is some use for
> it.  I
> > haven't looked at the code around this area to know how hard it would be
> to
> > implement the setting and clearing of the bit.
>
> I think so too, we would be able to use unused fourth status of bits
> efficiently.
> Should I include these improvement into this patch?
> This topic should be discussed on another thread after this feature is
> committed, I think.
>

The impossible state acts as a diagnostic check for us to ensure the bitmap
is not itself corrupt.

-1 for using it for another purpose.

-- 
Simon Riggshttp://www.2ndQuadrant.com/

PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services


[HACKERS] Minor code improvements to create_foreignscan_plan/ExecInitForeignScan

2015-07-10 Thread Etsuro Fujita
To save cycles, I modified create_foreignscan_plan so that it detects
whether any system columns are requested if scanning a base relation.
Also, I revised other code there a little bit.

For ExecInitForeignScan, I simplified the code there to determine the
scan tuple type, whith seems to me complex beyound necessity.  Maybe
that might be nitpicking, though.

Best regards,
Etsuro Fujita
*** a/src/backend/executor/nodeForeignscan.c
--- b/src/backend/executor/nodeForeignscan.c
***
*** 159,182  ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
  	}
  
  	/*
! 	 * Determine the scan tuple type.  If the FDW provided a targetlist
! 	 * describing the scan tuples, use that; else use base relation's rowtype.
  	 */
! 	if (node->fdw_scan_tlist != NIL || currentRelation == NULL)
  	{
  		TupleDesc	scan_tupdesc;
  
  		scan_tupdesc = ExecTypeFromTL(node->fdw_scan_tlist, false);
  		ExecAssignScanType(&scanstate->ss, scan_tupdesc);
  		/* Node's targetlist will contain Vars with varno = INDEX_VAR */
  		tlistvarno = INDEX_VAR;
  	}
- 	else
- 	{
- 		ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation));
- 		/* Node's targetlist will contain Vars with varno = scanrelid */
- 		tlistvarno = scanrelid;
- 	}
  
  	/*
  	 * Initialize result tuple type and projection info.
--- 159,183 
  	}
  
  	/*
! 	 * Determine the scan tuple type.
  	 */
! 	if (scanrelid > 0)
! 	{
! 		/* Use base relation's rowtype */
! 		ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation));
! 		/* Node's targetlist will contain Vars with varno = scanrelid */
! 		tlistvarno = scanrelid;
! 	}
! 	else
  	{
  		TupleDesc	scan_tupdesc;
  
+ 		/* Use targetlist provided by the FDW */
  		scan_tupdesc = ExecTypeFromTL(node->fdw_scan_tlist, false);
  		ExecAssignScanType(&scanstate->ss, scan_tupdesc);
  		/* Node's targetlist will contain Vars with varno = INDEX_VAR */
  		tlistvarno = INDEX_VAR;
  	}
  
  	/*
  	 * Initialize result tuple type and projection info.
*** a/src/backend/optimizer/plan/createplan.c
--- b/src/backend/optimizer/plan/createplan.c
***
*** 2050,2058  create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
  	RelOptInfo *rel = best_path->path.parent;
  	Index		scan_relid = rel->relid;
  	Oid			rel_oid = InvalidOid;
- 	Bitmapset  *attrs_used = NULL;
- 	ListCell   *lc;
- 	int			i;
  
  	Assert(rel->fdwroutine != NULL);
  
--- 2050,2055 
***
*** 2112,2147  create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
  	}
  
  	/*
! 	 * Detect whether any system columns are requested from rel.  This is a
! 	 * bit of a kluge and might go away someday, so we intentionally leave it
! 	 * out of the API presented to FDWs.
! 	 *
! 	 * First, examine all the attributes needed for joins or final output.
! 	 * Note: we must look at reltargetlist, not the attr_needed data, because
! 	 * attr_needed isn't computed for inheritance child rels.
  	 */
! 	pull_varattnos((Node *) rel->reltargetlist, rel->relid, &attrs_used);
! 
! 	/* Add all the attributes used by restriction clauses. */
! 	foreach(lc, rel->baserestrictinfo)
  	{
! 		RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
  
! 		pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
! 	}
  
! 	/* Now, are any system columns requested from rel? */
! 	scan_plan->fsSystemCol = false;
! 	for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
! 	{
! 		if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_used))
  		{
! 			scan_plan->fsSystemCol = true;
! 			break;
  		}
- 	}
  
! 	bms_free(attrs_used);
  
  	return scan_plan;
  }
--- 2109,2153 
  	}
  
  	/*
! 	 * If we're scanning a base relation, detect whether any system columns
! 	 * are requested from the rel.  (Irrelevant if scanning a join relation.)
! 	 * This is a bit of a kluge and might go away someday, so we intentionally
! 	 * leave it out of the API presented to FDWs.
  	 */
! 	scan_plan->fsSystemCol = false;
! 	if (scan_relid > 0)
  	{
! 		Bitmapset  *attrs_used = NULL;
! 		ListCell   *lc;
! 		int			i;
  
! 		/*
! 		 * First, examine all the attributes needed for joins or final output.
! 		 * Note: we must look at reltargetlist, not the attr_needed data,
! 		 * because attr_needed isn't computed for inheritance child rels.
! 		 */
! 		pull_varattnos((Node *) rel->reltargetlist, scan_relid, &attrs_used);
  
! 		/* Add all the attributes used by restriction clauses. */
! 		foreach(lc, rel->baserestrictinfo)
  		{
! 			RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
! 
! 			pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
  		}
  
! 		/* Now, are any system columns requested from rel? */
! 		for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
! 		{
! 			if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_used))
! 			{
! scan_plan->fsSystemCol = true;
! break;
! 			}
! 		}
! 
! 		bms_free(attrs_used);
! 	}
  
  	return scan_plan;
  }

-- 
S

Re: [HACKERS] configure can't detect proper pthread flags

2015-07-10 Thread Heikki Linnakangas

On 07/08/2015 08:50 PM, Tom Lane wrote:

Heikki Linnakangas  writes:

The only scenario where you might now get warnings if we switch to
upstream version, and didn't before, is if one of the flags makes
pthreads to work, but also creates compiler warnings, while another flag
later in the list would make it work without warnings. That's not
totally inconceivable, but I doubt it happens. In any case, there's
nothing PostgreSQL-specific about that, so if that happens, I'd like to
find out so that we can complain to the upstream.


Actually, it looks like the modern version of ax_pthread.m4 adds -Werror
while testing, so that this should not be an issue anyway (at least on
compilers that accept -Werror).


To conclude this thread: I replaced our custom pthread-checking autoconf 
macro with the latest upstream version, in PostgreSQL master. That 
should fix your original problem with the uClibc, OpenSSL, and pthreads 
combination, in PostgreSQL master (i.e. the future 9.6 version).


As a workaround for current versions, you can give 
PTHREAD_CFLAGS=-pthread (or whatever the right flag for that platform 
is) explicitly on the configure command line. That way the autoconf 
script will only test if that option works, skipping the autodetection 
logic and the warnings-test, so it will pass.


- Heikki



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WAL logging problem in 9.4.3?

2015-07-10 Thread Andres Freund
On 2015-07-09 19:06:11 -0400, Tom Lane wrote:
> What evidence have you got to base that value judgement on?
>
> cab9a0656c36739f was based on an actual user complaint, so we have good
> evidence that there are people out there who care about the cost of
> truncating a table many times in one transaction.  On the other hand,
> I know of no evidence that anyone's depending on multiple sequential
> COPYs, nor intermixed COPY and INSERT, to be fast.  The original argument
> for having this COPY optimization at all was to make restoring pg_dump
> scripts in a single transaction fast; and that use-case doesn't care
> about anything but a single COPY into a virgin table.

Well, you'll hardly have heard complaints about COPY, given that we
behaved like currently for a long while.

I definitely know of ETL like processes that have relied on subsequent
COPYs into truncates relations being cheaper. Can't remember the same
for intermixed COPY and INSERT, but it'd not surprise me if somebody
mixed COPY and UPDATEs rather freely for ETL.

> I think you're worrying about exactly the wrong case.
>
> > My tentative guess is that the best course is to
> > a) Make heap_truncate_one_rel() create a new relfeilnode. That fixes the
> >truncation replay issue.
> > b) Force new pages to be used when using the heap_sync mode in
> >COPY. That avoids the INIT danger you found. It seems rather
> >reasonable to avoid using pages that have already been the target of
> >WAL logging here in general.
>
> And what reason is there to think that this would fix all the
> problems?

Yea, that's the big problem.

> Again, the only known field usage for the COPY optimization is the pg_dump
> scenario; were that not so, we'd have noticed the problem long since.
> So I don't have any faith that this is a well-tested area.

You need to crash in the right moment. I don't think that's that
frequently exercised...


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WAL logging problem in 9.4.3?

2015-07-10 Thread Heikki Linnakangas

On 07/10/2015 02:06 AM, Tom Lane wrote:

Andres Freund  writes:

On 2015-07-06 11:49:54 -0400, Tom Lane wrote:

Rather than reverting cab9a0656c36739f, which would re-introduce a
different performance problem, perhaps we could have COPY create a new
relfilenode when it does this.  That should be safe if the table was
previously empty.



I'm not convinced that cab9a0656c36739f needs to survive in that
form. To me only allowing one COPY to benefit from the wal_level =
minimal optimization has a significantly higher cost than
cab9a0656c36739f.


What evidence have you got to base that value judgement on?

cab9a0656c36739f was based on an actual user complaint, so we have good
evidence that there are people out there who care about the cost of
truncating a table many times in one transaction.


Yeah, if we specifically made that case cheap, in response to a 
complaint, it would be a regression to make it expensive again. We might 
get away with it in a major version, but would hate to backpatch that.



My tentative guess is that the best course is to
a) Make heap_truncate_one_rel() create a new relfeilnode. That fixes the
truncation replay issue.
b) Force new pages to be used when using the heap_sync mode in
COPY. That avoids the INIT danger you found. It seems rather
reasonable to avoid using pages that have already been the target of
WAL logging here in general.


And what reason is there to think that this would fix all the problems?
We know of those two, but we've not exactly looked hard for other cases.


Hmm. Perhaps that could be made to work, but it feels pretty fragile. 
For example, you could have an insert trigger on the table that inserts 
additional rows to the same table, and those inserts would be intermixed 
with the rows inserted by COPY. You'll have to avoid that somehow. 
Full-page images in general are a problem. If a checkpoint happens, and 
a trigger modifies the page we're COPYing to in any way, you have the 
same issue. Even reading a page can cause a full-page image of it to be 
written: If you update a hint bit on the page while reading it, and 
checksums are enabled, and a checkpoint happened since the page was last 
updated, bang. I don't think that's a problem in this case because there 
are no hint bits to be set on pages that we're COPYing to, but it's a 
whole new subtle assumption.


I think we should
1. reliably and explicitly keep track of whether we've WAL-logged any 
TRUNCATE, INSERT/UPDATE+INIT, or any other full-page-logging operations 
on the relation, and

2. make sure we never skip WAL-logging again if we have.

Let's add a flag, rd_skip_wal_safe, to RelationData that's initially set 
when a new relfilenode is created, i.e. whenever rd_createSubid or 
rd_newRelfilenodeSubid is set. Whenever a TRUNCATE or a full-page image 
(including INSERT/UPDATE+INIT) is WAL-logged, clear the flag. In copy.c, 
only skip WAL-logging if the flag is still set. To deal with the case 
that the flag gets cleared in the middle of COPY, also check the flag 
whenever we're about to skip WAL-logging in heap_insert, and if it's 
been cleared, ignore the HEAP_INSERT_SKIP_WAL option and WAL-log anyway.


Compared to the status quo, that disables the WAL-skipping optimization 
in the scenario where you CREATE, INSERT, then COPY to a table in the 
same transaction. I think that's acceptable.


(Alternatively, to handle the case that the flag gets cleared in the 
middle of COPY, add another flag to RelationData indicating that a 
WAL-skipping COPY is in-progress, and refrain from WAL-logging any 
FPW-writing operations on the table when it's set (or any operations 
whatsoever). That'd be more efficient, but it's such a rare corner case 
that it hardly matters.)


- Heikki



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Freeze avoidance of very large table.

2015-07-10 Thread Sawada Masahiko
On Fri, Jul 10, 2015 at 3:42 AM, Jeff Janes  wrote:
> On Wed, Jul 8, 2015 at 10:10 PM, Sawada Masahiko 
> wrote:
>>
>> On Thu, Jul 9, 2015 at 4:31 AM, Jeff Janes  wrote:
>> > On Fri, Jul 3, 2015 at 1:25 AM, Sawada Masahiko 
>> > wrote:
>> >>
>> >> It's impossible to have VM bits set to frozen but not visible.
>> >> These bit are controlled independently. But eventually, when
>> >> all-frozen bit is set, all-visible is also set.
>> >
>> >
>> > If that combination is currently impossible, could it be used indicate
>> > that
>> > the page is all empty?
>>
>> Yeah, the status of that VM bits set to frozen but not visible is
>> impossible, so we could use this status for another something status
>> of the page.
>>
>> > Having a crash-proof bitmap of all-empty pages would make vacuum
>> > truncation
>> > scans much more efficient.
>>
>> The empty page is always marked all-visible by vacuum today, it's not
>> enough?
>
>
> The "current" vacuum can just remember that they were empty as well as
> all-visible.
>
> But the next vacuum that occurs on the table won't know that they are empty,
> just that they are all-visible, so it can't truncate them away without
> having to read each one first.

Yeah, it would be effective for vacuum empty page.

>
> It is a minor thing, but if there is no other use for this fourth
> "bit-space", it seems a shame to waste it when there is some use for it.  I
> haven't looked at the code around this area to know how hard it would be to
> implement the setting and clearing of the bit.

I think so too, we would be able to use unused fourth status of bits
efficiently.
Should I include these improvement into this patch?
This topic should be discussed on another thread after this feature is
committed, I think.

Regards,

--
Sawada Masahiko


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Asynchronous execution on FDW

2015-07-10 Thread Kyotaro HORIGUCHI
Hi,

> Currently there's no means to observe what it is doing from
> outside, so the additional sixth patch is to output debug
> messages about asynchronous execution.

The sixth patch did not contain one message shown in the example.
Attached is the revised version.
Other patches are not changed.

-- 
Kyotaro Horiguchi
NTT Open Source Software Center
>From d1ed9fe6a4e68d42653a552a680a038a0aef5683 Mon Sep 17 00:00:00 2001
From: Kyotaro Horiguchi 
Date: Fri, 10 Jul 2015 15:02:59 +0900
Subject: [PATCH 6/6] Debug message for async execution of postgres_fdw.

---
 contrib/postgres_fdw/postgres_fdw.c | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index dc60bcc..a8a9cc5 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -385,6 +385,25 @@ postgres_fdw_handler(PG_FUNCTION_ARGS)
 	PG_RETURN_POINTER(routine);
 }
 
+static void
+postgresDebugLog(PgFdwScanState *fsstate, char *msg, void* ptr)
+{
+	ForeignTable *table = GetForeignTable(RelationGetRelid(fsstate->rel));
+	ForeignServer *server = GetForeignServer(table->serverid);
+
+	if (fsstate->conn)
+		ereport(LOG,
+(errmsg ("pg_fdw: [%s/%s/%p] %s",
+		 get_rel_name(table->relid), server->servername,
+		 fsstate->conn, msg),
+ errhidestmt(true)));
+	else
+		ereport(LOG,
+(errmsg ("pg_fdw: [%s/%s/--] %s",
+		 get_rel_name(table->relid), server->servername, msg),
+ errhidestmt(true)));
+}
+
 /*
  * Read boolean server/table options
  * 0 is false, 1 is true, -1 is not specified
@@ -928,7 +947,10 @@ postgresStartForeignScan(ForeignScanState *node)
 	PgFdwScanState *fsstate = (PgFdwScanState *)node->fdw_state;
 
 	if (!fsstate->allow_async)
+	{
+		postgresDebugLog(fsstate, "Async start admistratively denied.", NULL);
 		return false;
+	}
 
 	/*
 	 * On the current implemnt, scans can run asynchronously if it is the
@@ -943,9 +965,11 @@ postgresStartForeignScan(ForeignScanState *node)
 		 * for details
 		 */
 		fetch_more_data(fsstate, START_ONLY);
+		postgresDebugLog(fsstate, "Async exec started.", fsstate->conn);
 		return true;
 	}
 
+	postgresDebugLog(fsstate, "Async exec denied.", NULL);
 	return false;
 }
 
@@ -2162,11 +2186,16 @@ fetch_more_data(PgFdwScanState *fsstate, fetch_mode cmd)
 			 */
 			if (fsstate != PFCgetAsyncScan(conn))
 			{
+postgresDebugLog(fsstate,
+ "Changed to sync fetch (different scan)",
+ fsstate->conn);
 fetch_more_data(PFCgetAsyncScan(conn), EXIT_ASYNC);
 res = PFCexec(conn, sql);
 			}
 			else
 			{
+postgresDebugLog(fsstate,
+ "Async fetch", fsstate->conn);
 /* Get result of running async fetch */
 res = PFCgetResult(conn);
 if (PQntuples(res) == fetch_size)
@@ -2196,6 +2225,7 @@ fetch_more_data(PgFdwScanState *fsstate, fetch_mode cmd)
 			}
 
 			/* Elsewise do synchronous query execution */
+			postgresDebugLog(fsstate, "Sync fetch.", conn);
 			PFCsetAsyncScan(conn, NULL);
 			res = PFCexec(conn, sql);
 		}
-- 
1.8.3.1


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers