Re: Multi-Column List Partitioning

2021-10-11 Thread Rajkumar Raghuwanshi
 0x00845536 in planner (parse=0x2f56d30,
query_string=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);",
cursorOptions=2048, boundParams=0x0) at planner.c:277
#18 0x00978faf in pg_plan_query (querytree=0x2f56d30,
query_string=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);",
cursorOptions=2048, boundParams=0x0) at postgres.c:847
#19 0x00693e50 in ExplainOneQuery (query=0x2f56d30,
cursorOptions=2048, into=0x0, es=0x2fa0920,
queryString=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);",
params=0x0, queryEnv=0x0) at explain.c:397
#20 0x006939a5 in ExplainQuery (pstate=0x2f9e0a0, stmt=0x2f56b50,
params=0x0, dest=0x2f9e008) at explain.c:281
#21 0x00981de8 in standard_ProcessUtility (pstmt=0x2fd2220,
queryString=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);",
readOnlyTree=false, context=PROCESS_UTILITY_TOPLEVEL, params=0x0,
queryEnv=0x0, dest=0x2f9e008, qc=0x7ffea91f9aa0) at utility.c:862
#22 0x00981585 in ProcessUtility (pstmt=0x2fd2220,
queryString=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);",
readOnlyTree=false, context=PROCESS_UTILITY_TOPLEVEL, params=0x0,
queryEnv=0x0, dest=0x2f9e008, qc=0x7ffea91f9aa0) at utility.c:527
#23 0x009801ba in PortalRunUtility (portal=0x2f10180,
pstmt=0x2fd2220, isTopLevel=true, setHoldSnapshot=true, dest=0x2f9e008,
qc=0x7ffea91f9aa0) at pquery.c:1155
#24 0x0097ff20 in FillPortalStore (portal=0x2f10180,
isTopLevel=true) at pquery.c:1028
#25 0x0097f883 in PortalRun (portal=0x2f10180,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x2fd2310,
altdest=0x2fd2310, qc=0x7ffea91f9c60) at pquery.c:760
#26 0x009795d1 in exec_simple_query (
query_string=0x2eadcd0 "EXPLAIN (COSTS OFF)\nSELECT
t1.c,t2.c,t3.c,t1.d,t2.d,t3.d FROM plt1 t1 INNER JOIN plt2 t2 ON (t1.c =
t2.c AND t1.d = t2.d) LEFT JOIN plt1 t3 on (t2.c = t3.c AND t2.d = t3.d);")
at postgres.c:1214
#27 0x0097da8d in PostgresMain (dbname=0x2ed8068 "postgres",
username=0x2ed8048 "edb") at postgres.c:4497
#28 0x008b9699 in BackendRun (port=0x2ecfd00) at postmaster.c:4560

Thanks & Regards,
Rajkumar Raghuwanshi



On Mon, Oct 11, 2021 at 11:05 AM Rajkumar Raghuwanshi <
rajkumar.raghuwan...@enterprisedb.com> wrote:

> Thanks for the patch, it applied cleanly and fixed the reported issue.  I
> observed another case where
> In case of multi-col list partition on the same column query is not
> picking partition wise join. Is this expected?
>
> CREATE TABLE plt1 (a int, b int, c varchar) PARTITION BY LIST(c,c);
> CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN
> (('0001','0001'),('0002','0002'),('0003','0003'));
> CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN
> (('0004','0004'),('0005','0005'),('0006','0006'));
> CREATE TABLE plt1_p3 PARTITION OF plt1 DEFAULT;
> INSERT INTO plt1 SELECT i, i % 47, to_char(i % 11, 'FM') FROM
> generate_series(0, 500) i WHERE i % 11 NOT  IN (0,10);
> ANALYSE plt1;
> CREATE TABLE plt2 (a int, b int, c varchar) PARTITION BY LIST(c,c);
> CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN
> (('0001','0001'),('0002','0002'),('0003','0003'));
> CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN
> (('0004','0004'),('0005','0005'),('0006','0006'));
> CREATE TABLE plt2_p3 PARTITION OF plt2 DEFAULT;
> INSERT INTO plt2 SELECT i, i % 47, to_char(i % 11, 'FM') FROM
> generate_series(0, 500) i WHERE i % 11 NOT  IN (0,10);
> ANALYSE plt2;
> SET enable_partitionwise_join TO true;
> EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c FROM plt1 t1 INNER JOIN
> plt2 t2 ON t1.c = t2.c;
>
> postgres=# EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c FROM plt1 t1
> INNER JOIN plt2 t2 ON t1.c = t2.c;
>  QUERY PLAN
> 
>  Hash Join
>Hash Cond: ((t1.c)::text = (t2.c)::text)
>->  Append
>  ->  Seq Scan on plt1_p1 t1_1
>  ->  Seq Scan on plt1_p2 t1_2
>  ->  Seq Scan on plt1_p3 t1_3
>->  Hash
>  ->  Append
>->  Seq Scan on plt2_p1 t2_1
>->

Re: Multi-Column List Partitioning

2021-10-10 Thread Rajkumar Raghuwanshi
Thanks for the patch, it applied cleanly and fixed the reported issue.  I
observed another case where
In case of multi-col list partition on the same column query is not picking
partition wise join. Is this expected?

CREATE TABLE plt1 (a int, b int, c varchar) PARTITION BY LIST(c,c);
CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN
(('0001','0001'),('0002','0002'),('0003','0003'));
CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN
(('0004','0004'),('0005','0005'),('0006','0006'));
CREATE TABLE plt1_p3 PARTITION OF plt1 DEFAULT;
INSERT INTO plt1 SELECT i, i % 47, to_char(i % 11, 'FM') FROM
generate_series(0, 500) i WHERE i % 11 NOT  IN (0,10);
ANALYSE plt1;
CREATE TABLE plt2 (a int, b int, c varchar) PARTITION BY LIST(c,c);
CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN
(('0001','0001'),('0002','0002'),('0003','0003'));
CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN
(('0004','0004'),('0005','0005'),('0006','0006'));
CREATE TABLE plt2_p3 PARTITION OF plt2 DEFAULT;
INSERT INTO plt2 SELECT i, i % 47, to_char(i % 11, 'FM') FROM
generate_series(0, 500) i WHERE i % 11 NOT  IN (0,10);
ANALYSE plt2;
SET enable_partitionwise_join TO true;
EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c FROM plt1 t1 INNER JOIN
plt2 t2 ON t1.c = t2.c;

postgres=# EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c FROM plt1 t1
INNER JOIN plt2 t2 ON t1.c = t2.c;
 QUERY PLAN

 Hash Join
   Hash Cond: ((t1.c)::text = (t2.c)::text)
   ->  Append
 ->  Seq Scan on plt1_p1 t1_1
 ->  Seq Scan on plt1_p2 t1_2
 ->  Seq Scan on plt1_p3 t1_3
   ->  Hash
 ->  Append
   ->  Seq Scan on plt2_p1 t2_1
   ->  Seq Scan on plt2_p2 t2_2
   ->  Seq Scan on plt2_p3 t2_3
(11 rows)

Thanks & Regards,
Rajkumar Raghuwanshi



On Thu, Oct 7, 2021 at 6:03 PM Nitin Jadhav 
wrote:

> Thanks Rajkumar for testing.
>
> > I think it should throw an error as the partition by list has only 1
> column but we are giving 2 values.
>
> I also agree that it should throw an error in the above case. Fixed the
> issue in the attached patch. Also added related test cases to the
> regression test suite.
>
>
> > also if you see \d+ showing plt1_p1 partition value as ‘(0001,0001)’
> instead of ('0001','0001').
>
> Now throwing errors in the initial stage, this case doesn't arise.
>
> Please share if you find any other issues.
>
> Thanks & Regards,
> Nitin Jadhav
>
>
>
>
>
> On Thu, Oct 7, 2021 at 4:05 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Thanks Nitin,
>>
>> v4 patches applied cleanly and make check is passing now. While testing
>> further I observed that if multiple values are given for a single
>> column list partition it is not giving error instead it is changing
>> values itself. Please find the example below.
>>
>> postgres=# CREATE TABLE plt1 (a int, b varchar) PARTITION BY LIST(b);
>> CREATE TABLE
>> postgres=# CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN
>> (('0001','0001'),('0002','0002'));
>> CREATE TABLE
>> postgres=# \d+ plt1;
>>   Partitioned table "public.plt1"
>>  Column |   Type| Collation | Nullable | Default | Storage  |
>> Compression | Stats target | Description
>>
>> +---+---+--+-+--+-+--+-
>>  a  | integer   |   |  | | plain|
>> |  |
>>  b  | character varying |   |  | | extended |
>> |  |
>> Partition key: LIST (b)
>> Partitions: plt1_p1 FOR VALUES IN ('(0001,0001)', '(0002,0002)')
>>
>> I think it should throw an error as the partition by list has only 1
>> column but we are giving 2 values.
>> also if you see \d+ showing plt1_p1 partition value as ‘(0001,0001)’
>> instead of ('0001','0001').
>>
>> Thanks & Regards,
>> Rajkumar Raghuwanshi
>>
>>
>>
>> On Sun, Oct 3, 2021 at 1:52 AM Nitin Jadhav <
>> nitinjadhavpostg...@gmail.com> wrote:
>>
>>> > > On PG head + Nitin's v3 patch + Amit's Delta patch.  Make check is
>>> failing with below errors.
>>> >
>>> > Thanks Rajkumar for testing.
>>> >
>>> > Here's a v2 of the delta patch that should fix both of these test
>>> > failures.  As I mentioned in my last reply, my delta patch fixed what
>>> > I think were problems in Nitin's v3 patch but were not complete by
>>> > themselves.  

Re: Multi-Column List Partitioning

2021-10-07 Thread Rajkumar Raghuwanshi
Thanks Nitin,

v4 patches applied cleanly and make check is passing now. While testing
further I observed that if multiple values are given for a single
column list partition it is not giving error instead it is changing values
itself. Please find the example below.

postgres=# CREATE TABLE plt1 (a int, b varchar) PARTITION BY LIST(b);
CREATE TABLE
postgres=# CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN
(('0001','0001'),('0002','0002'));
CREATE TABLE
postgres=# \d+ plt1;
  Partitioned table "public.plt1"
 Column |   Type| Collation | Nullable | Default | Storage  |
Compression | Stats target | Description
+---+---+--+-+--+-+--+-
 a  | integer   |   |  | | plain|
  |  |
 b  | character varying |   |  | | extended |
  |  |
Partition key: LIST (b)
Partitions: plt1_p1 FOR VALUES IN ('(0001,0001)', '(0002,0002)')

I think it should throw an error as the partition by list has only 1 column
but we are giving 2 values.
also if you see \d+ showing plt1_p1 partition value as ‘(0001,0001)’
instead of ('0001','0001').

Thanks & Regards,
Rajkumar Raghuwanshi



On Sun, Oct 3, 2021 at 1:52 AM Nitin Jadhav 
wrote:

> > > On PG head + Nitin's v3 patch + Amit's Delta patch.  Make check is
> failing with below errors.
> >
> > Thanks Rajkumar for testing.
> >
> > Here's a v2 of the delta patch that should fix both of these test
> > failures.  As I mentioned in my last reply, my delta patch fixed what
> > I think were problems in Nitin's v3 patch but were not complete by
> > themselves.  Especially, I hadn't bothered to investigate various /*
> > TODO: handle multi-column list partitioning */ sites to deal with my
> > own changes.
>
> Thanks Rajkumar for testing and Thank you Amit for working on v2 of
> the delta patch. Actually I had done the code changes related to
> partition-wise join and I was in the middle of fixing the review
> comments, So I could not share the patch. Anyways thanks for your
> efforts.
>
> > I noticed that multi-column list partitions containing NULLs don't
> > work correctly with partition pruning yet.
> >
> > create table p0 (a int, b text, c bool) partition by list (a, b, c);
> > create table p01 partition of p0 for values in ((1, 1, true), (NULL, 1,
> false));
> > create table p02 partition of p0 for values in ((1, NULL, false));
> > explain select * from p0 where a is null;
> >QUERY PLAN
> > 
> > Seq Scan on p01 p0  (cost=0.00..22.50 rows=6 width=37)
> >  Filter: (a IS NULL)
> > (2 rows)
> >
> > In the attached updated version, I've dealt with some of those such
> > that at least the existing cases exercising partition pruning and
> > partition wise joins now pass.
>
> wrt partition pruning, I have checked the output of the above case
> with the v2 version of the delta patch and without that. The output
> remains same. Kindly let me know if I am missing something. But I feel
> the above output is correct as the partition p01 is the only partition
> which contains NULL value for column a, hence it is showing "Seq scan
> on p01" in the output. Kindly correct me if I am wrong. I feel the
> code changes related to 'null_keys' is not required, hence not
> incorporated that in the attached patch.
>
> wrt partition-wise join, I had run the regression test (with new cases
> related to partition-wise join) on v2 of the delta patch and observed
> the crash. Hence I have not incorporated the partition-wise join
> related code from v2 of delta patch to main v4 patch. Instead I have
> added the partition-wise join related code done by me in the attached
> patch. Please share your thoughts and if possible we can improvise the
> code. Rest of the changes looks good to me and I have incorporated
> that in the attached patch.
>
>
> > I guess that may be due to the following newly added code being
> incomplete:
> > Maybe this function needs to return a "bitmapset" of indexes, because
> > multiple partitions can now contain NULL values.
>
> I feel this function is not required at all as we are not separating
> the non null and null partitions now. Removed in the attached patch.
> Also removed the "scan_null' variable from the structure
> "PruneStepResult" and cleaned up the corresponding code blocks.
>
>
> > This function name may be too generic.  Given that it is specific to
> > implementing list bound de-duplication, maybe the follo

Getting ERROR "subplan "SubPlan 1" was not initialized" in EXISTS subplan when using for list partition.

2021-09-14 Thread Rajkumar Raghuwanshi
Hi,

I am getting "ERROR:  subplan "SubPlan 1" was not initialized" error with
below test case.

CREATE TABLE tbl ( c1 int, c2 int, c3 int ) PARTITION BY LIST (c1);
create table tbl_null PARTITION OF tbl FOR VALUES IN (null);
create table tbl_def PARTITION OF tbl DEFAULT;
insert into tbl values (8800,0,0);
insert into tbl values (1891,1,1);
insert into tbl values (3420,2,0);
insert into tbl values (9850,3,0);
insert into tbl values (7164,4,4);
analyze tbl;
explain (costs off) select count(*) from tbl t1 where (exists(select 1 from
tbl t2 where t2.c1 = t1.c2) or c3 < 0);

postgres=# explain (costs off) select count(*) from tbl t1 where
(exists(select 1 from tbl t2 where t2.c1 = t1.c2) or c3 < 0);
ERROR:  subplan "SubPlan 1" was not initialized

Thanks & Regards,
Rajkumar Raghuwanshi


Re: Multi-Column List Partitioning

2021-09-13 Thread Rajkumar Raghuwanshi
 t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;", cursorOptions=2048, boundParams=0x0)
at planner.c:406
#19 0x00844ce9 in planner (parse=0x1aaa290,
query_string=0x1830fa0 "EXPLAIN (COSTS OFF)\nSELECT t1.a, t1.c, t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;", cursorOptions=2048, boundParams=0x0)
at planner.c:277
#20 0x00978483 in pg_plan_query (querytree=0x1aaa290,
query_string=0x1830fa0 "EXPLAIN (COSTS OFF)\nSELECT t1.a, t1.c, t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;", cursorOptions=2048, boundParams=0x0)
at postgres.c:847
#21 0x006937fc in ExplainOneQuery (query=0x1aaa290,
cursorOptions=2048, into=0x0, es=0x19b36f0,
queryString=0x1830fa0 "EXPLAIN (COSTS OFF)\nSELECT t1.a, t1.c, t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;",
params=0x0, queryEnv=0x0) at explain.c:397
#22 0x00693351 in ExplainQuery (pstate=0x197c410, stmt=0x1aaa0b0,
params=0x0, dest=0x197c378) at explain.c:281
#23 0x009811fa in standard_ProcessUtility (pstmt=0x1a0bfc8,
queryString=0x1830fa0 "EXPLAIN (COSTS OFF)\nSELECT t1.a, t1.c, t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;",
readOnlyTree=false, context=PROCESS_UTILITY_TOPLEVEL, params=0x0,
queryEnv=0x0, dest=0x197c378, qc=0x7fffd6775f90) at utility.c:845
#24 0x009809ec in ProcessUtility (pstmt=0x1a0bfc8,
queryString=0x1830fa0 "EXPLAIN (COSTS OFF)\nSELECT t1.a, t1.c, t2.a,
t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c =
t2.c) WHERE t1.b < 10 ORDER BY t1.a;",
readOnlyTree=false, context=PROCESS_UTILITY_TOPLEVEL, params=0x0,
queryEnv=0x0, dest=0x197c378, qc=0x7fffd6775f90) at utility.c:527
#25 0x0097f636 in PortalRunUtility (portal=0x1893b40,
pstmt=0x1a0bfc8, isTopLevel=true, setHoldSnapshot=true, dest=0x197c378,
qc=0x7fffd6775f90) at pquery.c:1147
#26 0x0097f3a5 in FillPortalStore (portal=0x1893b40,
isTopLevel=true) at pquery.c:1026
#27 0x0097ed11 in PortalRun (portal=0x1893b40,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x1a0c0b8,
altdest=0x1a0c0b8, qc=0x7fffd6776150) at pquery.c:758
#28 0x00978aa5 in exec_simple_query (

Thanks & Regards,
Rajkumar Raghuwanshi


On Fri, Sep 3, 2021 at 7:17 PM Amit Langote  wrote:

> On Wed, Sep 1, 2021 at 2:31 PM Amit Langote 
> wrote:
> > On Tue, Aug 31, 2021 at 8:02 PM Nitin Jadhav
> >  wrote:
> > > The attached patch also fixes the above comments.
> >
> > I noticed that multi-column list partitions containing NULLs don't
> > work correctly with partition pruning yet.
> >
> > create table p0 (a int, b text, c bool) partition by list (a, b, c);
> > create table p01 partition of p0 for values in ((1, 1, true), (NULL, 1,
> false));
> > create table p02 partition of p0 for values in ((1, NULL, false));
> > explain select * from p0 where a is null;
> >QUERY PLAN
> > 
> >  Seq Scan on p01 p0  (cost=0.00..22.50 rows=6 width=37)
> >Filter: (a IS NULL)
> > (2 rows)
> >
> > I guess that may be due to the following newly added code being
> incomplete:
> >
> > +/*
> > + * get_partition_bound_null_index
> > + *
> > + * Returns the partition index of the partition bound which accepts
> NULL.
> > + */
> > +int
> > +get_partition_bound_null_index(PartitionBoundInfo boundinfo)
> > +{
> > +   int i = 0;
> > +   int j = 0;
> > +
> > +   if (!boundinfo->isnulls)
> > +   return -1;
> >
> > -   if (!val->constisnull)
> > -   count++;
> > +   for (i = 0; i < boundinfo->ndatums; i++)
> > +   {
> > +   //TODO: Handle for multi-column cases
> > +   for (j = 0; j < 1; j++)
> > +   {
> > +   if (boundinfo->isnulls[i][j])
> > +   return boundinfo->indexes[i];
> > }
> > }
> >
> > +   return -1;
> > +}
> >
> > Maybe this function needs to return a "bitmapset" of indexes, because
> > multiple partitions can now contain NULL values.
> >
> > Some other issues I noticed and suggestions for improvement:
> >
> > +/*
> > + * checkForDuplicates
> > + *
> > + * Returns TRUE if the list bound element is already present in the
> list of
> > + * list bounds, FALSE otherwise.
&

Re: Multi-Column List Partitioning

2021-08-30 Thread Rajkumar Raghuwanshi
Hi Nitin.

I have been testing these patches. Patches applied cleanly on the head.
While testing I found below a case where update row movement is not working
properly.
Please find the test case below.

postgres=# create table p0 (a int, b text, c bool) partition by list
(a,b,c);
CREATE TABLE
postgres=# create table p01 partition of p0 for values in ((1,1,true));
CREATE TABLE
postgres=# create table p02 partition of p0 for values in ((1,NULL,false));
CREATE TABLE
postgres=# insert into p0 values (1,'1',true);
INSERT 0 1
postgres=# insert into p0 values (1,NULL,false);
INSERT 0 1
postgres=# select tableoid::regclass,* from p0;
 tableoid | a | b | c
--+---+---+---
 p01  | 1 | 1 | t
 p02  | 1 |   | f
(2 rows)

postgres=# update p0 set b = NULL;
UPDATE 2
postgres=# select tableoid::regclass,* from p0;
 tableoid | a | b | c
--+---+---+---
 p01  | 1 |   | t
 p02  | 1 |   | f
(2 rows)

I think this update should fail as there is no partition satisfying update
row (1,NULL,true).

Thanks & Regards,
Rajkumar Raghuwanshi


On Fri, Aug 27, 2021 at 12:53 PM Nitin Jadhav 
wrote:

> > + * isnulls is an array of boolean-tuples with key->partnatts booleans
> values
> > + * each.  Currently only used for list partitioning, it stores whether a
> >
> > I think 'booleans' should be 'boolean'.
> > The trailing word 'each' is unnecessary.
>
> > bq. Supported new syantx to allow mentioning multiple key information.
> >
> > syantx -> syntax
>
> > +   isDuplicate = checkForDuplicates(result, values);
> > +   if (isDuplicate)
> > +   continue;
> >
> > It seems the variable isDuplicate is not needed. The if statement can
> directly check the return value from checkForDuplicates().
>
> I agree that isDuplicate is not required.
> Thanks for sharing the comments. I will take care of these comments in
> the next patch.
>
> > +   //TODO: Handle for multi-column cases
> > +   for (j = 0; j < 1; j++)
> >
> > Is this part going to be updated in the next patch?
>
> Yes. The code changes related to partition-wise join are in progress.
> I will handle these in the next patch.
>
> Thanks & Regards,
> Nitin Jadhav
>
> On Thu, Aug 26, 2021 at 2:40 AM Zhihong Yu  wrote:
> >
> >
> >
> > On Wed, Aug 25, 2021 at 5:41 AM Nitin Jadhav <
> nitinjadhavpostg...@gmail.com> wrote:
> >>
> >> > The new list bound binary search and related comparison support
> >> > function look a bit too verbose to me.  I was expecting
> >> > partition_list_bsearch() to look very much like
> >> > partition_range_datum_bsearch(), but that is not the case.  The
> >> > special case code that you wrote in partition_list_bsearch() seems
> >> > unnecessary, at least in that function.  I'm talking about the code
> >> > fragment starting with this comment:
> >> >
> >> > I will look at other parts of the patch next week hopefully.   For
> >> > now, attached is a delta patch that applies on top of your v1, which
> >> > does:
> >> >
> >> > * Simplify partition_list_bsearch() and partition_lbound_datum_cmp()
> >> > * Make qsort_partition_list_value_cmp simply call
> >> > partition_lbound_datum_cmp() instead of having its own logic to
> >> > compare input bounds
> >> > * Move partition_lbound_datum_cmp() into partbounds.c as a static
> >> > function (export seems unnecessary)
> >> > * Add a comment for PartitionBoundInfo.isnulls and remove that for
> null_index
> >>
> >> Yes. You are right. The extra code added in partition_list_bsearch()
> >> is not required and thanks for sharing the delta patch. It looks good
> >> to me and I have incorporated the changes in the attached patch.
> >>
> >> > I guess you're perhaps trying to address the case where the caller
> >> > does not specify the values for all of the partition key columns,
> >> > which can happen when the partition pruning code needs to handle a set
> >> > of clauses matching only some of the partition key columns.  But
> >> > that's a concern of the partition pruning code and so the special case
> >> > should be handled there (if at all), not in the binary search function
> >> > that is shared with other callers.  Regarding that, I'm wondering if
> >> > we should require clauses matching all of the partition key columns to
> >> > be found for the pruning code to call the binary search, so do
> >> > something like get_matching_hash_bounds() does:
> >> >
>

Re: recovering from "found xmin ... from before relfrozenxid ..."

2020-08-18 Thread Rajkumar Raghuwanshi
Thanks for suggestion Ashutosh, I have done testing around these suggestion
and found no issues. I will continue testing same with updated patch posted
on this thread.

On Fri, Aug 7, 2020 at 12:45 PM Ashutosh Sharma 
wrote:

> Thanks Rajkumar for testing the patch.
>
> Here are some of the additional test-cases that I would suggest you to
> execute, if possible:
>
> 1) You may try running the test-cases that you have executed so far
> with SR setup and see if the changes are getting reflected on the
> standby.
>
> 2) You may also try running some concurrent test-cases for e.g. try
> running these functions with VACUUM or some other sql commands
> (preferable DML commands) in parallel.
>
> 3) See what happens when you pass some invalid tids (containing
> invalid block or offset number) to these functions. You may also try
> running these functions on the same tuple repeatedly and see the
> behaviour.
>
> ...
>
> --
> With Regards,
> Ashutosh Sharma
> EnterpriseDB:http://www.enterprisedb.com


Thanks & Regards,
Rajkumar Raghuwanshi


Re: recovering from "found xmin ... from before relfrozenxid ..."

2020-08-06 Thread Rajkumar Raghuwanshi
I have been doing some user-level testing of this feature, apart from
sanity test for extension and it's functions

I have tried to corrupt tuples and then able to fix it using
heap_force_freeze/kill functions.


--corrupt relfrozenxid, cause vacuum failed.

update pg_class set relfrozenxid = (relfrozenxid::text::integer +
10)::text::xid where relname = 'test_tbl';

UPDATE 1

insert into test_tbl values (2, 'BBB');


postgres=# vacuum test_tbl;

ERROR:  found xmin 507 from before relfrozenxid 516

CONTEXT:  while scanning block 0 of relation "public.test_tbl"


postgres=# select *, ctid, xmin, xmax from test_tbl;

 a |  b  | ctid  | xmin | xmax

---+-+---+--+--

 1 | AAA | (0,1) |  505 |0

 2 | BBB | (0,2) |  507 |0

(2 rows)


--fixed using heap_force_freeze

postgres=# select heap_force_freeze('test_tbl'::regclass,
ARRAY['(0,2)']::tid[]);

 heap_force_freeze

---


postgres=# vacuum test_tbl;

VACUUM

postgres=# select *, ctid, xmin, xmax from test_tbl;

 a |  b  | ctid  | xmin | xmax

---+-+---+--+--

 1 | AAA | (0,1) |  505 |0

 2 | BBB | (0,2) |2 |0

(2 rows)


--corrupt table headers in base/oid. file, cause table access failed.

postgres=# select ctid, * from test_tbl;

ERROR:  could not access status of transaction 4294967295

DETAIL:  Could not open file "pg_xact/0FFF": No such file or directory.


--removed corrupted tuple using heap_force_kill

postgres=# select heap_force_kill('test_tbl'::regclass,
ARRAY['(0,2)']::tid[]);

 heap_force_kill

-



(1 row)


postgres=# select ctid, * from test_tbl;

 ctid  | a |  b

---+---+-

 (0,1) | 1 | AAA

(1 row)


I will be continuing with my testing with the latest patch and update here
if found anything.


Thanks & Regards,
Rajkumar Raghuwanshi


On Thu, Aug 6, 2020 at 1:42 PM Masahiko Sawada <
masahiko.saw...@2ndquadrant.com> wrote:

> On Wed, 5 Aug 2020 at 22:42, Ashutosh Sharma 
> wrote:
> >
> > Hi Robert,
> >
> > Thanks for the review. Please find my comments inline:
> >
> > On Sat, Aug 1, 2020 at 12:18 AM Robert Haas 
> wrote:
> > >
> > > On Fri, Jul 31, 2020 at 8:52 AM Ashutosh Sharma 
> wrote:
> > > > Attached is the new version of patch that addresses the comments
> from Andrey and Beena.
> > >
> > > +PGFILEDESC = "pg_surgery - perform surgery on the damaged heap table"
> > >
> > > the -> a
> > >
> > > I also suggest: heap table -> relation, because we might want to
> > > extend this to other cases later.
> > >
> >
> > Corrected.
> >
> > > +-- toast table.
> > > +begin;
> > > +create table ttab(a text);
> > > +insert into ttab select string_agg(chr(floor(random() * 26)::int +
> > > 65), '') from generate_series(1,1);
> > > +select * from ttab where xmin = 2;
> > > + a
> > > +---
> > > +(0 rows)
> > > +
> > > +select heap_force_freeze('ttab'::regclass, ARRAY['(0, 1)']::tid[]);
> > > + heap_force_freeze
> > > +---
> > > +
> > > +(1 row)
> > > +
> > >
> > > I don't understand the point of this. You're not testing the function
> > > on the TOAST table; you're testing it on the main table when there
> > > happens to be a TOAST table that is probably getting used for
> > > something. But that's not really relevant to what is being tested
> > > here, so as written this seems redundant with the previous cases.
> > >
> >
> > Yeah, it's being tested on the main table, not on a toast table. I've
> > removed this test-case and also restricted direct access to the toast
> > table using heap_force_kill/freeze functions. I think we shouldn't be
> > using these functions to do any changes in the toast table. We will
> > only use these functions with the main table and let VACUUM remove the
> > corresponding data chunks (pointed by the tuple that got removed from
> > the main table).
> >
> > Another option would be to identify all the data chunks corresponding
> > to the tuple (ctid) being killed from the main table and remove them
> > one by one. We will only do this if the tuple from the main table that
> > has been marked as killed has an external storage. We will have to add
> > a bunch of code for this otherwise we can let VACUUM do this for us.
> > Let me know your thoughts on this.
> >
> > > +-- test pg_surgery functions with the unsupported relations. Should
> fail.
> > >
> > > Please name the specific functions being tested here in case we add
> > > more in the future that are tested separate

Getting ERROR with FOR UPDATE/SHARE for partitioned table.

2020-05-22 Thread Rajkumar Raghuwanshi
Hi All,

I am getting ERROR when using the "FOR UPDATE" clause for the partitioned
table. below is a reproducible test case for the same.

CREATE TABLE tbl (c1 INT,c2 TEXT) PARTITION BY LIST (c1);
CREATE TABLE tbl_null PARTITION OF tbl FOR VALUES IN (NULL);
CREATE TABLE tbl_1 PARTITION OF tbl FOR VALUES IN (1,2,3);

INSERT INTO tbl SELECT i,i FROM generate_series(1,3) i;

CREATE OR REPLACE FUNCTION func(i int) RETURNS int
AS $$
DECLARE
 v_var tbl%ROWTYPE;
 cur CURSOR IS SELECT * FROM tbl WHERE c1< 5 FOR UPDATE;
BEGIN
 OPEN cur;
 LOOP
  FETCH cur INTO v_var;
  EXIT WHEN NOT FOUND;
  UPDATE tbl SET c2='aa' WHERE CURRENT OF cur;
 END LOOP;
 CLOSE cur;
 RETURN 10;
END;
$$ LANGUAGE PLPGSQL;

SELECT func(10);

postgres=# SELECT func(10);
ERROR:  cursor "cur" does not have a FOR UPDATE/SHARE reference to table
"tbl_null"
CONTEXT:  SQL statement "UPDATE tbl SET c2='aa' WHERE CURRENT OF cur"
PL/pgSQL function func(integer) line 10 at SQL statement

Thanks & Regards,
Rajkumar Raghuwanshi


Re: WIP/PoC for parallel backup

2020-04-23 Thread Rajkumar Raghuwanshi
On Thu, Apr 23, 2020 at 1:47 PM Asif Rehman  wrote:

>
>
> On Thu, Apr 23, 2020 at 11:43 AM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>>
>>
>> On Wed, Apr 22, 2020 at 7:48 PM Asif Rehman 
>> wrote:
>>
>>>
>>> Hi Dipesh,
>>>
>>> The rebased and updated patch is attached. Its rebased to (9f2c4ede).
>>>
>>
>> Make is failing for v15 patch.
>>
>> gcc -std=gnu99 -Wall -Wmissing-prototypes -Wpointer-arith
>> -Wdeclaration-after-statement -Werror=vla -Wendif-labels
>> -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv
>> -g -g -O0 -I. -I. -I../../../src/include  -D_GNU_SOURCE   -c -o
>> basebackup.o basebackup.c -MMD -MP -MF .deps/basebackup.Po
>> In file included from basebackup.c:33:
>> ../../../src/include/replication/backup_manifest.h:37: error:
>> redefinition of typedef ‘manifest_info’
>> ../../../src/include/replication/basebackup.h:35: note: previous
>> declaration of ‘manifest_info’ was here
>> make[3]: *** [basebackup.o] Error 1
>> make[3]: Leaving directory
>> `/home/edb/WORKDB/PG2/postgresql/src/backend/replication'
>> make[2]: *** [replication-recursive] Error 2
>>
>>
> Just compiled on clean source and its compiling fine. Can you see if you
> have a clean source tree?
>
Yeah, my machine is not cleaned. My colleague Suraj is also able to compile.
Thanks, sorry for the noise.


>
>
> --
> --
> Asif Rehman
> Highgo Software (Canada/China/Pakistan)
> URL : www.highgo.ca
>
>


Re: WIP/PoC for parallel backup

2020-04-23 Thread Rajkumar Raghuwanshi
On Wed, Apr 22, 2020 at 7:48 PM Asif Rehman  wrote:

>
> Hi Dipesh,
>
> The rebased and updated patch is attached. Its rebased to (9f2c4ede).
>

Make is failing for v15 patch.

gcc -std=gnu99 -Wall -Wmissing-prototypes -Wpointer-arith
-Wdeclaration-after-statement -Werror=vla -Wendif-labels
-Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv
-g -g -O0 -I. -I. -I../../../src/include  -D_GNU_SOURCE   -c -o
basebackup.o basebackup.c -MMD -MP -MF .deps/basebackup.Po
In file included from basebackup.c:33:
../../../src/include/replication/backup_manifest.h:37: error: redefinition
of typedef ‘manifest_info’
../../../src/include/replication/basebackup.h:35: note: previous
declaration of ‘manifest_info’ was here
make[3]: *** [basebackup.o] Error 1
make[3]: Leaving directory
`/home/edb/WORKDB/PG2/postgresql/src/backend/replication'
make[2]: *** [replication-recursive] Error 2


>
>


Re: create partition table caused server crashed with self-referencing foreign key

2020-04-22 Thread Rajkumar Raghuwanshi
Thanks all for quick fix and push.

Thanks & Regards,
Rajkumar Raghuwanshi


On Wed, Apr 22, 2020 at 4:14 PM Ahsan Hadi  wrote:

>
>
> On Wed, Apr 22, 2020 at 2:45 PM amul sul  wrote:
>
>>
>>
>> On Wed, Apr 22, 2020 at 2:59 PM amul sul  wrote:
>>
>>>
>>>
>>> On Wed, Apr 22, 2020 at 2:27 PM David Rowley 
>>> wrote:
>>>
>>>> On Wed, 22 Apr 2020 at 20:11, amul sul  wrote:
>>>> >
>>>> > On Wed, Apr 22, 2020 at 1:21 PM Rajkumar Raghuwanshi <
>>>> rajkumar.raghuwan...@enterprisedb.com> wrote:
>>>> >> #2  0x00acd16a in ExceptionalCondition
>>>> (conditionName=0xc32310 "numfks == attmap->maplen", errorType=0xc2ea23
>>>> "FailedAssertion", fileName=0xc2f0bf "tablecmds.c", lineNumber=9046) at
>>>> assert.c:67
>>>> >
>>>> >
>>>> > Looks like this assertion is incorrect, I guess it should have check
>>>> > numfks <= attmap->maplen instead.
>>>>
>>>> Even that seems like a very strange thing to Assert. Basically it's
>>>> saying, make sure the number of columns in the foreign key constraint
>>>> is less than or equal to the number of attributes in parentRel.
>>>>
>>>> It's true we do disallow duplicate column names in the foreign key
>>>> constraint (at least since 9da867537), but why do we want an Assert to
>>>> say that?  I don't see anything about that code that would break if we
>>>> did happen to allow duplicate columns in the foreign key.  I'd say the
>>>> Assert should just be removed completely.
>>>>
>>>
>>> Understood and agree with you.
>>>
>>> Attached patch removes this assertion and does a slight tweak to
>>> regression test
>>> to generate case where numfks != attmap->maplen, IMO, we should have
>>> this
>>> even if there is nothing that checks it. Thoughts?
>>>
>>
>> Kindly ignore the previously attached patch, correct patch attached here.
>>
>
> I did a quick test of the fix, the assertion failure is fixed and
> regression is not reporting any failures.
>
>
>>
>> Regards,
>> Amul
>>
>
>
> --
> Highgo Software (Canada/China/Pakistan)
> URL : http://www.highgo.ca
> ADDR: 10318 WHALLEY BLVD, Surrey, BC
> EMAIL: mailto: ahsan.h...@highgo.ca
>


create partition table caused server crashed with self-referencing foreign key

2020-04-22 Thread Rajkumar Raghuwanshi
Hi,

Getting a server crash while creating partition table which  have
self-referencing foreign key

postgres=# CREATE TABLE part1 (c1 int PRIMARY KEY, c2 int REFERENCES part1)
PARTITION BY LIST (c1);
CREATE TABLE
postgres=# CREATE TABLE part1_p1 PARTITION OF part1 FOR VALUES IN (1);
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The connection to the server was lost. Attempting reset: Failed.

--stack-trace
[edb@localhost bin]$ gdb -q -c data/core.16883 postgres
Core was generated by `postgres: edb postgres [local] CREATE TABLE
'.
Program terminated with signal 6, Aborted.
#0  0x0039212324f5 in raise (sig=6) at
../nptl/sysdeps/unix/sysv/linux/raise.c:64
64  return INLINE_SYSCALL (tgkill, 3, pid, selftid, sig);
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-24.el6.x86_64 libgcc-4.4.7-23.el6.x86_64
libselinux-2.0.94-7.el6.x86_64 openssl-1.0.1e-58.el6_10.x86_64
zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  0x0039212324f5 in raise (sig=6) at
../nptl/sysdeps/unix/sysv/linux/raise.c:64
#1  0x003921233cd5 in abort () at abort.c:92
#2  0x00acd16a in ExceptionalCondition (conditionName=0xc32310
"numfks == attmap->maplen", errorType=0xc2ea23 "FailedAssertion",
fileName=0xc2f0bf "tablecmds.c", lineNumber=9046) at assert.c:67
#3  0x006d1b6c in CloneFkReferenced (parentRel=0x7f3c80be2400,
partitionRel=0x7f3c80be2a50) at tablecmds.c:9046
#4  0x006d189b in CloneForeignKeyConstraints (wqueue=0x0,
parentRel=0x7f3c80be2400, partitionRel=0x7f3c80be2a50) at tablecmds.c:8939
#5  0x006c09a8 in DefineRelation (stmt=0x2ff25b8, relkind=114 'r',
ownerId=10, typaddress=0x0, queryString=0x2f19810 "CREATE TABLE part1_p1
PARTITION OF part1 FOR VALUES IN (1);")
at tablecmds.c:1151
#6  0x00953021 in ProcessUtilitySlow (pstate=0x2ff24a0,
pstmt=0x2f1a588, queryString=0x2f19810 "CREATE TABLE part1_p1 PARTITION OF
part1 FOR VALUES IN (1);", context=PROCESS_UTILITY_TOPLEVEL,
params=0x0, queryEnv=0x0, dest=0x2f1a868, qc=0x7c1faa10) at
utility.c:1154
#7  0x00952dfe in standard_ProcessUtility (pstmt=0x2f1a588,
queryString=0x2f19810 "CREATE TABLE part1_p1 PARTITION OF part1 FOR VALUES
IN (1);", context=PROCESS_UTILITY_TOPLEVEL, params=0x0,
queryEnv=0x0, dest=0x2f1a868, qc=0x7c1faa10) at utility.c:1067
#8  0x00951d18 in ProcessUtility (pstmt=0x2f1a588,
queryString=0x2f19810 "CREATE TABLE part1_p1 PARTITION OF part1 FOR VALUES
IN (1);", context=PROCESS_UTILITY_TOPLEVEL, params=0x0, queryEnv=0x0,
dest=0x2f1a868, qc=0x7c1faa10) at utility.c:522
#9  0x00950b48 in PortalRunUtility (portal=0x2f808c0,
pstmt=0x2f1a588, isTopLevel=true, setHoldSnapshot=false, dest=0x2f1a868,
qc=0x7c1faa10) at pquery.c:1157
#10 0x00950d6e in PortalRunMulti (portal=0x2f808c0,
isTopLevel=true, setHoldSnapshot=false, dest=0x2f1a868, altdest=0x2f1a868,
qc=0x7c1faa10) at pquery.c:1303
#11 0x0095023a in PortalRun (portal=0x2f808c0,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x2f1a868,
altdest=0x2f1a868, qc=0x7c1faa10) at pquery.c:779
#12 0x0094a2a3 in exec_simple_query (query_string=0x2f19810 "CREATE
TABLE part1_p1 PARTITION OF part1 FOR VALUES IN (1);") at postgres.c:1239
#13 0x0094e38e in PostgresMain (argc=1, argv=0x2f44998,
dbname=0x2f448b0 "postgres", username=0x2f44890 "edb") at postgres.c:4315
#14 0x0089ba5d in BackendRun (port=0x2f3c7f0) at postmaster.c:4510
#15 0x0089b24c in BackendStartup (port=0x2f3c7f0) at
postmaster.c:4202
#16 0x008975be in ServerLoop () at postmaster.c:1727
#17 0x00896f07 in PostmasterMain (argc=3, argv=0x2f14240) at
postmaster.c:1400
#18 0x007999cc in main (argc=3, argv=0x2f14240) at main.c:210

Thanks & Regards,
Rajkumar Raghuwanshi


Re: ERROR: could not open file "pg_tblspc/ issue with replication setup.

2020-04-17 Thread Rajkumar Raghuwanshi
On Fri, Apr 17, 2020 at 9:51 AM Michael Paquier  wrote:

> On Thu, Apr 16, 2020 at 01:56:47PM +0530, Rajkumar Raghuwanshi wrote:
> > While testing for a feature I got this tablespace related error while
> > running script.
>
> Primary and standby are running on the same host, so they would
> interact with each other as the tablespace path used by both clusters
> would be the same (primary uses the path defined by the DDL, which is
> registered in the WAL record the standby replays).  What you are
> looking for here is to create the tablespace before taking the base
> backup, and then use the option --tablespace-mapping with
> pg_basebackup to avoid the issue.
>
Thanks for the help.



> --
> Michael
>


ERROR: could not open file "pg_tblspc/ issue with replication setup.

2020-04-16 Thread Rajkumar Raghuwanshi
Hi,

While testing for a feature I got this tablespace related error while
running script.
The Issue is not reproducible everytime, but If I am running the same set
of commands after 2-3 runs I am able to reproduce the same error.

--first run - pass
# master slave setup
+ mkdir /tmp/test_bkp/tblsp01
+ ./psql postgres -p 5432 -c 'create tablespace tblsp01 location
'\''/tmp/test_bkp/tblsp01'\'';'
CREATE TABLESPACE
+ ./psql postgres -p 5432 -c 'create table test (a text) tablespace
tblsp01;'
CREATE TABLE
#cleanup

--next
#master-slave setup
+ mkdir /tmp/test_bkp/tblsp01
+ ./psql postgres -p 5432 -c 'create tablespace tblsp01 location
'\''/tmp/test_bkp/tblsp01'\'';'
CREATE TABLESPACE
+ ./psql postgres -p 5432 -c 'create table test (a text) tablespace
tblsp01;'
ERROR:  could not open file "pg_tblspc/16384/PG_13_202004074/13530/16388":
No such file or directory


Attaching command and script which help to reproduce it.
[edb@localhost bin]$ while sh pg_tblsp_wal.sh; do :; done

Thanks & Regards,
Rajkumar Raghuwanshi


pg_tblsp_wal.sh
Description: application/shellscript


Re: WIP/PoC for parallel backup

2020-04-15 Thread Rajkumar Raghuwanshi
Hi Asif,

In below scenarios backup verification failed for tablespace, when backup
taken with parallel option.
without parallel for the same scenario pg_verifybackup is passed without
any error.

[edb@localhost bin]$ mkdir /tmp/test_bkp/tblsp1
[edb@localhost bin]$ ./psql postgres -p 5432 -c "create tablespace tblsp1
location '/tmp/test_bkp/tblsp1';"
CREATE TABLESPACE
[edb@localhost bin]$ ./psql postgres -p 5432 -c "create table test (a text)
tablespace tblsp1;"
CREATE TABLE
[edb@localhost bin]$ ./psql postgres -p 5432 -c "insert into test values
('parallel_backup with -T tablespace option');"
INSERT 0 1
[edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/test_bkp/bkp -T
/tmp/test_bkp/tblsp1=/tmp/test_bkp/tblsp2 -j 4
[edb@localhost bin]$ ./pg_verifybackup /tmp/test_bkp/bkp
pg_verifybackup: error: "pg_tblspc/16384/PG_13_202004074/13530/16390" is
present on disk but not in the manifest
pg_verifybackup: error: "pg_tblspc/16384/PG_13_202004074/13530/16388" is
present on disk but not in the manifest
pg_verifybackup: error: "pg_tblspc/16384/PG_13_202004074/13530/16385" is
present on disk but not in the manifest
pg_verifybackup: error: "/PG_13_202004074/13530/16388" is present in the
manifest but not on disk
pg_verifybackup: error: "/PG_13_202004074/13530/16390" is present in the
manifest but not on disk
pg_verifybackup: error: "/PG_13_202004074/13530/16385" is present in the
manifest but not on disk

--without parallel backup
[edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/test_bkp/bkp1 -T
/tmp/test_bkp/tblsp1=/tmp/test_bkp/tblsp3 -j 1
[edb@localhost bin]$ ./pg_verifybackup /tmp/test_bkp/bkp1
backup successfully verified


Thanks & Regards,
Rajkumar Raghuwanshi


On Wed, Apr 15, 2020 at 2:19 PM Ahsan Hadi  wrote:

>
>
> On Wed, 15 Apr 2020 at 1:49 AM, Robert Haas  wrote:
>
>> On Tue, Apr 14, 2020 at 10:37 AM Asif Rehman 
>> wrote:
>> > I forgot to make a check for no-manifest. Fixed. Attached is the
>> updated patch.
>>
>> +typedef struct
>> +{
>> ...
>> +} BackupFile;
>> +
>> +typedef struct
>> +{
>> ...
>> +} BackupState;
>>
>> These structures need comments.
>>
>> +list_wal_files_opt_list:
>> +   SCONST SCONST
>> {
>> - $$ = makeDefElem("manifest_checksums",
>> -
>> (Node *)makeString($2), -1);
>> +   $$ = list_make2(
>> +   makeDefElem("start_wal_location",
>> +   (Node *)makeString($2),
>> -1),
>> +   makeDefElem("end_wal_location",
>> +   (Node *)makeString($2),
>> -1));
>> +
>> }
>>
>> This seems like an unnecessarily complicated parse representation. The
>> DefElems seem to be completely unnecessary here.
>>
>> @@ -998,7 +1110,37 @@ SendBaseBackup(BaseBackupCmd *cmd)
>> set_ps_display(activitymsg);
>> }
>>
>> -   perform_base_backup();
>> +   switch (cmd->cmdtag)
>>
>> So the design here is that SendBaseBackup() is now going to do a bunch
>> of things that are NOT sending a base backup? With no updates to the
>> comments of that function and no change to the process title it sets?
>>
>> -   return (manifest->buffile != NULL);
>> +   return (manifest && manifest->buffile != NULL);
>>
>> Heck no. It appears that you didn't even bother reading the function
>> header comment.
>>
>> + * Send a single resultset containing XLogRecPtr record (in text format)
>> + * TimelineID and backup label.
>>   */
>>  static void
>> -SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
>> +SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli,
>> +StringInfo label, char *backupid)
>>
>> This just casually breaks wire protocol compatibility, which seems
>> completely unacceptable.
>>
>> +   if (strlen(opt->tablespace) > 0)
>> +   sendTablespace(opt->tablespace, NULL, true, NULL, );
>> +   else
>> +   sendDir(".", 1, true, NIL, true, NULL, NULL, );
>> +
>> +   SendFilesHeader(files);
>>
>> So I guess the idea here is that we buffer the entire list of files in
>> memory, regardless of size, and then we send it out afterwards. That
>> doesn't seem like a good idea. The list of files might 

Re: variation of row_number with parallel

2020-04-13 Thread Rajkumar Raghuwanshi
On Tue, Apr 14, 2020 at 9:39 AM Pavel Stehule 
wrote:

>
>
> út 14. 4. 2020 v 5:59 odesílatel Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> napsal:
>
>> Hi,
>>
>> I have observed row_number() is giving different results when query
>> executed in parallel. is this expected w.r.t parallel execution.
>>
>> CREATE TABLE tbl1 (c1 INT) partition by list (c1);
>> CREATE TABLE tbl1_p1 partition of tbl1 FOR VALUES IN (10);
>> CREATE TABLE tbl1_p2 partition of tbl1 FOR VALUES IN (20);
>> CREATE TABLE tbl1_p3 partition of tbl1 FOR VALUES IN (30);
>>
>> CREATE TABLE tbl2 (c1 INT, c2 INT,c3 INT) partition by list (c1);
>> CREATE TABLE tbl2_p1 partition of tbl2 FOR VALUES IN (1);
>> CREATE TABLE tbl2_p2 partition of tbl2 FOR VALUES IN (2);
>> CREATE TABLE tbl2_p3 partition of tbl2 FOR VALUES IN (3);
>> CREATE TABLE tbl2_p4 partition of tbl2 FOR VALUES IN (4);
>> CREATE TABLE tbl2_p5 partition of tbl2 FOR VALUES IN (5);
>>
>> INSERT INTO tbl1 VALUES (10),(20),(30);
>>
>> INSERT INTO tbl2 VALUES
>> (1,100,20),(2,200,10),(3,100,20),(4,100,30),(5,100,10);
>>
>> postgres=# explain select e.c2, row_number() over () from tbl1 d, tbl2 e
>> where d.c1=e.c3;
>>   QUERY PLAN
>>
>>
>> ---
>>  WindowAgg  (cost=1520.35..12287.73 rows=390150 width=12)
>>->  Merge Join  (cost=1520.35..7410.85 rows=390150 width=4)
>>  Merge Cond: (d.c1 = e.c3)
>>  ->  Sort  (cost=638.22..657.35 rows=7650 width=4)
>>Sort Key: d.c1
>>->  Append  (cost=0.00..144.75 rows=7650 width=4)
>>  ->  Seq Scan on tbl1_p1 d_1  (cost=0.00..35.50
>> rows=2550 width=4)
>>  ->  Seq Scan on tbl1_p2 d_2  (cost=0.00..35.50
>> rows=2550 width=4)
>>  ->  Seq Scan on tbl1_p3 d_3  (cost=0.00..35.50
>> rows=2550 width=4)
>>  ->  Sort  (cost=882.13..907.63 rows=10200 width=8)
>>Sort Key: e.c3
>>->  Append  (cost=0.00..203.00 rows=10200 width=8)
>>  ->  Seq Scan on tbl2_p1 e_1  (cost=0.00..30.40
>> rows=2040 width=8)
>>  ->  Seq Scan on tbl2_p2 e_2  (cost=0.00..30.40
>> rows=2040 width=8)
>>  ->  Seq Scan on tbl2_p3 e_3  (cost=0.00..30.40
>> rows=2040 width=8)
>>  ->  Seq Scan on tbl2_p4 e_4  (cost=0.00..30.40
>> rows=2040 width=8)
>>  ->  Seq Scan on tbl2_p5 e_5  (cost=0.00..30.40
>> rows=2040 width=8)
>> (17 rows)
>>
>> postgres=# select e.c2, row_number() over () from tbl1 d, tbl2 e where
>> d.c1=e.c3;
>>  c2  | row_number
>> -+
>>  *200 |  1*
>>  100 |  2
>>  100 |  3
>>  100 |  4
>>  100 |  5
>> (5 rows)
>>
>> postgres=#
>> postgres=# set parallel_setup_cost = 0;
>> SET
>> postgres=# set parallel_tuple_cost = 0;
>> SET
>> postgres=#
>> postgres=# explain select e.c2, row_number() over () from tbl1 d, tbl2 e
>> where d.c1=e.c3;
>>   QUERY PLAN
>>
>>
>> --
>>  WindowAgg  (cost=130.75..7521.21 rows=390150 width=12)
>>->  Gather  (cost=130.75..2644.34 rows=390150 width=4)
>>  Workers Planned: 2
>>  ->  Parallel Hash Join  (cost=130.75..2644.34 rows=162562
>> width=4)
>>Hash Cond: (e.c3 = d.c1)
>>->  Parallel Append  (cost=0.00..131.25 rows=4250 width=8)
>>  ->  Parallel Seq Scan on tbl2_p1 e_1
>>  (cost=0.00..22.00 rows=1200 width=8)
>>  ->  Parallel Seq Scan on tbl2_p2 e_2
>>  (cost=0.00..22.00 rows=1200 width=8)
>>  ->  Parallel Seq Scan on tbl2_p3 e_3
>>  (cost=0.00..22.00 rows=1200 width=8)
>>  ->  Parallel Seq Scan on tbl2_p4 e_4
>>  (cost=0.00..22.00 rows=1200 width=8)
>>  ->  Parallel Seq Scan on tbl2_p5 e_5
>>  (cost=0.00..22.00 rows=1200 width=8)
>>->  Parallel Hash  (cost=90.93..90.93 rows=3186 width=4)
>>  ->  Parallel Append  (cost=0.00..90.93 rows=3186
>> width=4)
>>->  Parallel Seq Scan on tbl1_p1 d_

variation of row_number with parallel

2020-04-13 Thread Rajkumar Raghuwanshi
Hi,

I have observed row_number() is giving different results when query
executed in parallel. is this expected w.r.t parallel execution.

CREATE TABLE tbl1 (c1 INT) partition by list (c1);
CREATE TABLE tbl1_p1 partition of tbl1 FOR VALUES IN (10);
CREATE TABLE tbl1_p2 partition of tbl1 FOR VALUES IN (20);
CREATE TABLE tbl1_p3 partition of tbl1 FOR VALUES IN (30);

CREATE TABLE tbl2 (c1 INT, c2 INT,c3 INT) partition by list (c1);
CREATE TABLE tbl2_p1 partition of tbl2 FOR VALUES IN (1);
CREATE TABLE tbl2_p2 partition of tbl2 FOR VALUES IN (2);
CREATE TABLE tbl2_p3 partition of tbl2 FOR VALUES IN (3);
CREATE TABLE tbl2_p4 partition of tbl2 FOR VALUES IN (4);
CREATE TABLE tbl2_p5 partition of tbl2 FOR VALUES IN (5);

INSERT INTO tbl1 VALUES (10),(20),(30);

INSERT INTO tbl2 VALUES
(1,100,20),(2,200,10),(3,100,20),(4,100,30),(5,100,10);

postgres=# explain select e.c2, row_number() over () from tbl1 d, tbl2 e
where d.c1=e.c3;
  QUERY PLAN

---
 WindowAgg  (cost=1520.35..12287.73 rows=390150 width=12)
   ->  Merge Join  (cost=1520.35..7410.85 rows=390150 width=4)
 Merge Cond: (d.c1 = e.c3)
 ->  Sort  (cost=638.22..657.35 rows=7650 width=4)
   Sort Key: d.c1
   ->  Append  (cost=0.00..144.75 rows=7650 width=4)
 ->  Seq Scan on tbl1_p1 d_1  (cost=0.00..35.50
rows=2550 width=4)
 ->  Seq Scan on tbl1_p2 d_2  (cost=0.00..35.50
rows=2550 width=4)
 ->  Seq Scan on tbl1_p3 d_3  (cost=0.00..35.50
rows=2550 width=4)
 ->  Sort  (cost=882.13..907.63 rows=10200 width=8)
   Sort Key: e.c3
   ->  Append  (cost=0.00..203.00 rows=10200 width=8)
 ->  Seq Scan on tbl2_p1 e_1  (cost=0.00..30.40
rows=2040 width=8)
 ->  Seq Scan on tbl2_p2 e_2  (cost=0.00..30.40
rows=2040 width=8)
 ->  Seq Scan on tbl2_p3 e_3  (cost=0.00..30.40
rows=2040 width=8)
 ->  Seq Scan on tbl2_p4 e_4  (cost=0.00..30.40
rows=2040 width=8)
 ->  Seq Scan on tbl2_p5 e_5  (cost=0.00..30.40
rows=2040 width=8)
(17 rows)

postgres=# select e.c2, row_number() over () from tbl1 d, tbl2 e where
d.c1=e.c3;
 c2  | row_number
-+
 *200 |  1*
 100 |  2
 100 |  3
 100 |  4
 100 |  5
(5 rows)

postgres=#
postgres=# set parallel_setup_cost = 0;
SET
postgres=# set parallel_tuple_cost = 0;
SET
postgres=#
postgres=# explain select e.c2, row_number() over () from tbl1 d, tbl2 e
where d.c1=e.c3;
  QUERY PLAN

--
 WindowAgg  (cost=130.75..7521.21 rows=390150 width=12)
   ->  Gather  (cost=130.75..2644.34 rows=390150 width=4)
 Workers Planned: 2
 ->  Parallel Hash Join  (cost=130.75..2644.34 rows=162562 width=4)
   Hash Cond: (e.c3 = d.c1)
   ->  Parallel Append  (cost=0.00..131.25 rows=4250 width=8)
 ->  Parallel Seq Scan on tbl2_p1 e_1
 (cost=0.00..22.00 rows=1200 width=8)
 ->  Parallel Seq Scan on tbl2_p2 e_2
 (cost=0.00..22.00 rows=1200 width=8)
 ->  Parallel Seq Scan on tbl2_p3 e_3
 (cost=0.00..22.00 rows=1200 width=8)
 ->  Parallel Seq Scan on tbl2_p4 e_4
 (cost=0.00..22.00 rows=1200 width=8)
 ->  Parallel Seq Scan on tbl2_p5 e_5
 (cost=0.00..22.00 rows=1200 width=8)
   ->  Parallel Hash  (cost=90.93..90.93 rows=3186 width=4)
 ->  Parallel Append  (cost=0.00..90.93 rows=3186
width=4)
   ->  Parallel Seq Scan on tbl1_p1 d_1
 (cost=0.00..25.00 rows=1500 width=4)
   ->  Parallel Seq Scan on tbl1_p2 d_2
 (cost=0.00..25.00 rows=1500 width=4)
   ->  Parallel Seq Scan on tbl1_p3 d_3
 (cost=0.00..25.00 rows=1500 width=4)
(16 rows)

postgres=# select e.c2, row_number() over () from tbl1 d, tbl2 e where
d.c1=e.c3;
 c2  | row_number
-+
 100 |  1
 100 |  2
 100 |  3
 *200 |  4*
 100 |  5
(5 rows)

Thanks & Regards,
Rajkumar Raghuwanshi


Re: WIP/PoC for parallel backup

2020-04-07 Thread Rajkumar Raghuwanshi
Hi Asif,

Thanks for new patches.

Patches need to be rebased on head. Getting a failure while applying the
0003 patch.
edb@localhost postgresql]$ git apply
v11/0003-Parallel-Backup-Backend-Replication-commands.patch
error: patch failed: src/backend/storage/ipc/ipci.c:147
error: src/backend/storage/ipc/ipci.c: patch does not apply

I have applied v11 patches on commit -
23ba3b5ee278847e4fad913b80950edb2838fd35 to test further.

pg_basebackup has a new option "--no-estimate-size",  pg_basebackup crashes
when using this option.

[edb@localhost bin]$ ./pg_basebackup -D /tmp/bkp --no-estimate-size --jobs=2
Segmentation fault (core dumped)

--stacktrace
[edb@localhost bin]$ gdb -q -c core.80438 pg_basebackup
Loaded symbols for /lib64/libselinux.so.1
Core was generated by `./pg_basebackup -D /tmp/bkp --no-estimate-size
--jobs=2'.
Program terminated with signal 11, Segmentation fault.
#0  strtol_l_internal (nptr=0x0, endptr=0x0, base=10, group=, loc=0x392158ee40) at ../stdlib/strtol_l.c:298
298  while (ISSPACE (*s))
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  strtol_l_internal (nptr=0x0, endptr=0x0, base=10, group=, loc=0x392158ee40) at ../stdlib/strtol_l.c:298
#1  0x003921233b30 in atoi (nptr=) at atoi.c:28
#2  0x0040841e in main (argc=5, argv=0x7ffeaa6fb968) at
pg_basebackup.c:2526

Thanks & Regards,
Rajkumar Raghuwanshi


On Tue, Apr 7, 2020 at 11:07 PM Robert Haas  wrote:

> On Tue, Apr 7, 2020 at 1:25 PM Asif Rehman  wrote:
> > I will, however parallel backup is already quite a large patch. So I
> think we should first
> > agree on the current work before adding a backup manifest and
> progress-reporting support.
>
> It's going to be needed for commit, but it may make sense for us to do
> more review of what you've got here before we worry about it.
>
> I'm gonna try to find some time for that as soon as I can.
>
> --
> Robert Haas
> EnterpriseDB: http://www.enterprisedb.com
> The Enterprise PostgreSQL Company
>


Re: WIP/PoC for parallel backup

2020-04-02 Thread Rajkumar Raghuwanshi
Hi Asif,

My colleague Kashif Zeeshan reported an issue off-list, posting here,
please take a look.

When executing two backups at the same time, getting FATAL error due to
max_wal_senders and instead of exit  Backup got completed
And when tried to start the server from the backup cluster, getting error.

[edb@localhost bin]$ ./pgbench -i -s 200 -h localhost -p 5432 postgres
[edb@localhost bin]$ ./pg_basebackup -v -j 8 -D  /home/edb/Desktop/backup/
pg_basebackup: initiating base backup, waiting for checkpoint to complete
pg_basebackup: checkpoint completed
pg_basebackup: write-ahead log start point: 0/C2000270 on timeline 1
pg_basebackup: starting background WAL receiver
pg_basebackup: created temporary replication slot "pg_basebackup_57849"
pg_basebackup: backup worker (0) created
pg_basebackup: backup worker (1) created
pg_basebackup: backup worker (2) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (3) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (4) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (5) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (6) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (7) created
pg_basebackup: write-ahead log end point: 0/C350
pg_basebackup: waiting for background process to finish streaming ...
pg_basebackup: syncing data to disk ...
pg_basebackup: base backup completed
[edb@localhost bin]$ ./pg_basebackup -v -j 8 -D  /home/edb/Desktop/backup1/
pg_basebackup: initiating base backup, waiting for checkpoint to complete
pg_basebackup: checkpoint completed
pg_basebackup: write-ahead log start point: 0/C20001C0 on timeline 1
pg_basebackup: starting background WAL receiver
pg_basebackup: created temporary replication slot "pg_basebackup_57848"
pg_basebackup: backup worker (0) created
pg_basebackup: backup worker (1) created
pg_basebackup: backup worker (2) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (3) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (4) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (5) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (6) created
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: backup worker (7) created
pg_basebackup: write-ahead log end point: 0/C2000348
pg_basebackup: waiting for background process to finish streaming ...
pg_basebackup: syncing data to disk ...
pg_basebackup: base backup completed

[edb@localhost bin]$ ./pg_ctl -D /home/edb/Desktop/backup1/  -o "-p 5438"
start
pg_ctl: directory "/home/edb/Desktop/backup1" is not a database cluster
directory

Thanks & Regards,
Rajkumar Raghuwanshi


On Mon, Mar 30, 2020 at 6:28 PM Ahsan Hadi  wrote:

>
>
> On Mon, Mar 30, 2020 at 3:44 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Thanks Asif,
>>
>> I have re-verified reported issue. expect standby backup, others are
>> fixed.
>>
>
> Yes As Asif mentioned he is working on the standby issue and adding
> bandwidth throttling functionality to parallel backup.
>
> It would be good to get some feedback on Asif previous email from Robert
> on the design considerations for stand-by server support and throttling. I
> believe all the other points mentioned by Robert in this thread are
> addressed by Asif so it would be good to hear about any other concerns that
> are not addressed.
>
> Thanks,
>
> -- Ahsan
>
>
>> Thanks & Regards,
>> Rajkumar Raghuwanshi
>>
>>
>> On Fri, Mar 27, 2020 at 11:04 PM Asif Rehman 
>> wrote:
>>
>>>
>>>
>>> On Wed, Mar 25, 2020 at 12:22 PM Rajkumar Raghuwanshi <
>>> rajkumar.raghuwan...@enterprisedb.com> wrote:
>>>
>&g

Re: WIP/PoC for parallel backup

2020-03-30 Thread Rajkumar Raghuwanshi
Thanks Asif,

I have re-verified reported issue. expect standby backup, others are fixed.

Thanks & Regards,
Rajkumar Raghuwanshi


On Fri, Mar 27, 2020 at 11:04 PM Asif Rehman  wrote:

>
>
> On Wed, Mar 25, 2020 at 12:22 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Hi Asif,
>>
>> While testing further I observed parallel backup is not able to take
>> backup of standby server.
>>
>> mkdir /tmp/archive_dir
>> echo "archive_mode='on'">> data/postgresql.conf
>> echo "archive_command='cp %p /tmp/archive_dir/%f'">> data/postgresql.conf
>>
>> ./pg_ctl -D data -l logs start
>> ./pg_basebackup -p 5432 -Fp -R -D /tmp/slave
>>
>> echo "primary_conninfo='host=127.0.0.1 port=5432 user=edb'">>
>> /tmp/slave/postgresql.conf
>> echo "restore_command='cp /tmp/archive_dir/%f %p'">>
>> /tmp/slave/postgresql.conf
>> echo "promote_trigger_file='/tmp/failover.log'">>
>> /tmp/slave/postgresql.conf
>>
>> ./pg_ctl -D /tmp/slave -l /tmp/slave_logs -o "-p 5433" start -c
>>
>> [edb@localhost bin]$ ./psql postgres -p 5432 -c "select
>> pg_is_in_recovery();"
>>  pg_is_in_recovery
>> ---
>>  f
>> (1 row)
>>
>> [edb@localhost bin]$ ./psql postgres -p 5433 -c "select
>> pg_is_in_recovery();"
>>  pg_is_in_recovery
>> ---
>>  t
>> (1 row)
>>
>>
>>
>>
>> *[edb@localhost bin]$ ./pg_basebackup -p 5433 -D /tmp/bkp_s --jobs
>> 6pg_basebackup: error: could not list backup files: ERROR:  the standby was
>> promoted during online backupHINT:  This means that the backup being taken
>> is corrupt and should not be used. Try taking another online
>> backup.pg_basebackup: removing data directory "/tmp/bkp_s"*
>>
>> #same is working fine without parallel backup
>> [edb@localhost bin]$ ./pg_basebackup -p 5433 -D /tmp/bkp_s --jobs 1
>> [edb@localhost bin]$ ls /tmp/bkp_s/PG_VERSION
>> /tmp/bkp_s/PG_VERSION
>>
>> Thanks & Regards,
>> Rajkumar Raghuwanshi
>>
>>
>> On Thu, Mar 19, 2020 at 4:11 PM Rajkumar Raghuwanshi <
>> rajkumar.raghuwan...@enterprisedb.com> wrote:
>>
>>> Hi Asif,
>>>
>>> In another scenarios, bkp data is corrupted for tablespace. again this
>>> is not reproducible everytime,
>>> but If I am running the same set of commands I am getting the same error.
>>>
>>> [edb@localhost bin]$ ./pg_ctl -D data -l logfile start
>>> waiting for server to start done
>>> server started
>>> [edb@localhost bin]$
>>> [edb@localhost bin]$ mkdir /tmp/tblsp
>>> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create tablespace
>>> tblsp location '/tmp/tblsp';"
>>> CREATE TABLESPACE
>>> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create database testdb
>>> tablespace tblsp;"
>>> CREATE DATABASE
>>> [edb@localhost bin]$ ./psql testdb -p 5432 -c "create table testtbl (a
>>> text);"
>>> CREATE TABLE
>>> [edb@localhost bin]$ ./psql testdb -p 5432 -c "insert into testtbl
>>> values ('parallel_backup with tablespace');"
>>> INSERT 0 1
>>> [edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/bkp -T
>>> /tmp/tblsp=/tmp/tblsp_bkp --jobs 2
>>> [edb@localhost bin]$ ./pg_ctl -D /tmp/bkp -l /tmp/bkp_logs -o "-p "
>>> start
>>> waiting for server to start done
>>> server started
>>> [edb@localhost bin]$ ./psql postgres -p  -c "select * from
>>> pg_tablespace where spcname like 'tblsp%' or spcname = 'pg_default'";
>>>   oid  |  spcname   | spcowner | spcacl | spcoptions
>>> ---+----+--++
>>>   1663 | pg_default |   10 ||
>>>  16384 | tblsp  |   10 ||
>>> (2 rows)
>>>
>>> [edb@localhost bin]$ ./psql testdb -p  -c "select * from testtbl";
>>> psql: error: could not connect to server: FATAL:
>>>  "pg_tblspc/16384/PG_13_202003051/16385" is not a valid data directory
>>> DETAIL:  File "pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION" is
>>> missing.
>>> [edb@localhost bin]$
>>> [edb@localhost bin]$ ls
>>> data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
>>> data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
>>> [edb@localhost bin]

Re: WIP/PoC for parallel backup

2020-03-25 Thread Rajkumar Raghuwanshi
Hi Asif,

While testing further I observed parallel backup is not able to take backup
of standby server.

mkdir /tmp/archive_dir
echo "archive_mode='on'">> data/postgresql.conf
echo "archive_command='cp %p /tmp/archive_dir/%f'">> data/postgresql.conf

./pg_ctl -D data -l logs start
./pg_basebackup -p 5432 -Fp -R -D /tmp/slave

echo "primary_conninfo='host=127.0.0.1 port=5432 user=edb'">>
/tmp/slave/postgresql.conf
echo "restore_command='cp /tmp/archive_dir/%f %p'">>
/tmp/slave/postgresql.conf
echo "promote_trigger_file='/tmp/failover.log'">> /tmp/slave/postgresql.conf

./pg_ctl -D /tmp/slave -l /tmp/slave_logs -o "-p 5433" start -c

[edb@localhost bin]$ ./psql postgres -p 5432 -c "select
pg_is_in_recovery();"
 pg_is_in_recovery
---
 f
(1 row)

[edb@localhost bin]$ ./psql postgres -p 5433 -c "select
pg_is_in_recovery();"
 pg_is_in_recovery
---
 t
(1 row)




*[edb@localhost bin]$ ./pg_basebackup -p 5433 -D /tmp/bkp_s --jobs
6pg_basebackup: error: could not list backup files: ERROR:  the standby was
promoted during online backupHINT:  This means that the backup being taken
is corrupt and should not be used. Try taking another online
backup.pg_basebackup: removing data directory "/tmp/bkp_s"*

#same is working fine without parallel backup
[edb@localhost bin]$ ./pg_basebackup -p 5433 -D /tmp/bkp_s --jobs 1
[edb@localhost bin]$ ls /tmp/bkp_s/PG_VERSION
/tmp/bkp_s/PG_VERSION

Thanks & Regards,
Rajkumar Raghuwanshi


On Thu, Mar 19, 2020 at 4:11 PM Rajkumar Raghuwanshi <
rajkumar.raghuwan...@enterprisedb.com> wrote:

> Hi Asif,
>
> In another scenarios, bkp data is corrupted for tablespace. again this is
> not reproducible everytime,
> but If I am running the same set of commands I am getting the same error.
>
> [edb@localhost bin]$ ./pg_ctl -D data -l logfile start
> waiting for server to start done
> server started
> [edb@localhost bin]$
> [edb@localhost bin]$ mkdir /tmp/tblsp
> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create tablespace tblsp
> location '/tmp/tblsp';"
> CREATE TABLESPACE
> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create database testdb
> tablespace tblsp;"
> CREATE DATABASE
> [edb@localhost bin]$ ./psql testdb -p 5432 -c "create table testtbl (a
> text);"
> CREATE TABLE
> [edb@localhost bin]$ ./psql testdb -p 5432 -c "insert into testtbl values
> ('parallel_backup with tablespace');"
> INSERT 0 1
> [edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/bkp -T
> /tmp/tblsp=/tmp/tblsp_bkp --jobs 2
> [edb@localhost bin]$ ./pg_ctl -D /tmp/bkp -l /tmp/bkp_logs -o "-p "
> start
> waiting for server to start done
> server started
> [edb@localhost bin]$ ./psql postgres -p  -c "select * from
> pg_tablespace where spcname like 'tblsp%' or spcname = 'pg_default'";
>   oid  |  spcname   | spcowner | spcacl | spcoptions
> ---++--++
>   1663 | pg_default |   10 ||
>  16384 | tblsp  |   10 ||
> (2 rows)
>
> [edb@localhost bin]$ ./psql testdb -p  -c "select * from testtbl";
> psql: error: could not connect to server: FATAL:
>  "pg_tblspc/16384/PG_13_202003051/16385" is not a valid data directory
> DETAIL:  File "pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION" is
> missing.
> [edb@localhost bin]$
> [edb@localhost bin]$ ls
> data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
> data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
> [edb@localhost bin]$ ls
> /tmp/bkp/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
> ls: cannot access
> /tmp/bkp/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION: No such file or
> directory
>
>
> Thanks & Regards,
> Rajkumar Raghuwanshi
>
>
> On Mon, Mar 16, 2020 at 6:19 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Hi Asif,
>>
>> On testing further, I found when taking backup with -R, pg_basebackup
>> crashed
>> this crash is not consistently reproducible.
>>
>> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create table test (a
>> text);"
>> CREATE TABLE
>> [edb@localhost bin]$ ./psql postgres -p 5432 -c "insert into test values
>> ('parallel_backup with -R recovery-conf');"
>> INSERT 0 1
>> [edb@localhost bin]$ ./pg_basebackup -p 5432 -j 2 -D /tmp/test_bkp/bkp -R
>> Segmentation fault (core dumped)
>>
>> stack trace looks the same as it was on earlier reported crash with
>> tablespace.
>> --stack trace
>> [edb@localhost bin]$ gdb -q -c core.37915 pg_basebackup
>&g

Re: WIP/PoC for parallel backup

2020-03-19 Thread Rajkumar Raghuwanshi
Hi Asif,

In another scenarios, bkp data is corrupted for tablespace. again this is
not reproducible everytime,
but If I am running the same set of commands I am getting the same error.

[edb@localhost bin]$ ./pg_ctl -D data -l logfile start
waiting for server to start done
server started
[edb@localhost bin]$
[edb@localhost bin]$ mkdir /tmp/tblsp
[edb@localhost bin]$ ./psql postgres -p 5432 -c "create tablespace tblsp
location '/tmp/tblsp';"
CREATE TABLESPACE
[edb@localhost bin]$ ./psql postgres -p 5432 -c "create database testdb
tablespace tblsp;"
CREATE DATABASE
[edb@localhost bin]$ ./psql testdb -p 5432 -c "create table testtbl (a
text);"
CREATE TABLE
[edb@localhost bin]$ ./psql testdb -p 5432 -c "insert into testtbl values
('parallel_backup with tablespace');"
INSERT 0 1
[edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/bkp -T
/tmp/tblsp=/tmp/tblsp_bkp --jobs 2
[edb@localhost bin]$ ./pg_ctl -D /tmp/bkp -l /tmp/bkp_logs -o "-p "
start
waiting for server to start done
server started
[edb@localhost bin]$ ./psql postgres -p  -c "select * from
pg_tablespace where spcname like 'tblsp%' or spcname = 'pg_default'";
  oid  |  spcname   | spcowner | spcacl | spcoptions
---++--++
  1663 | pg_default |   10 ||
 16384 | tblsp  |   10 ||
(2 rows)

[edb@localhost bin]$ ./psql testdb -p  -c "select * from testtbl";
psql: error: could not connect to server: FATAL:
 "pg_tblspc/16384/PG_13_202003051/16385" is not a valid data directory
DETAIL:  File "pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION" is missing.
[edb@localhost bin]$
[edb@localhost bin]$ ls
data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
data/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
[edb@localhost bin]$ ls
/tmp/bkp/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION
ls: cannot access
/tmp/bkp/pg_tblspc/16384/PG_13_202003051/16385/PG_VERSION: No such file or
directory


Thanks & Regards,
Rajkumar Raghuwanshi


On Mon, Mar 16, 2020 at 6:19 PM Rajkumar Raghuwanshi <
rajkumar.raghuwan...@enterprisedb.com> wrote:

> Hi Asif,
>
> On testing further, I found when taking backup with -R, pg_basebackup
> crashed
> this crash is not consistently reproducible.
>
> [edb@localhost bin]$ ./psql postgres -p 5432 -c "create table test (a
> text);"
> CREATE TABLE
> [edb@localhost bin]$ ./psql postgres -p 5432 -c "insert into test values
> ('parallel_backup with -R recovery-conf');"
> INSERT 0 1
> [edb@localhost bin]$ ./pg_basebackup -p 5432 -j 2 -D /tmp/test_bkp/bkp -R
> Segmentation fault (core dumped)
>
> stack trace looks the same as it was on earlier reported crash with
> tablespace.
> --stack trace
> [edb@localhost bin]$ gdb -q -c core.37915 pg_basebackup
> Loaded symbols for /lib64/libnss_files.so.2
> Core was generated by `./pg_basebackup -p 5432 -j 2 -D /tmp/test_bkp/bkp
> -R'.
> Program terminated with signal 11, Segmentation fault.
> #0  0x004099ee in worker_get_files (wstate=0xc1e458) at
> pg_basebackup.c:3175
> 3175 backupinfo->curr = fetchfile->next;
> Missing separate debuginfos, use: debuginfo-install
> keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
> libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
> openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
> (gdb) bt
> #0  0x004099ee in worker_get_files (wstate=0xc1e458) at
> pg_basebackup.c:3175
> #1  0x00408a9e in worker_run (arg=0xc1e458) at pg_basebackup.c:2715
> #2  0x003921a07aa1 in start_thread (arg=0x7f72207c0700) at
> pthread_create.c:301
> #3  0x0039212e8c4d in clone () at
> ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
> (gdb)
>
> Thanks & Regards,
> Rajkumar Raghuwanshi
>
>
> On Mon, Mar 16, 2020 at 2:14 PM Jeevan Chalke <
> jeevan.cha...@enterprisedb.com> wrote:
>
>> Hi Asif,
>>
>>
>>> Thanks Rajkumar. I have fixed the above issues and have rebased the
>>> patch to the latest master (b7f64c64).
>>> (V9 of the patches are attached).
>>>
>>
>> I had a further review of the patches and here are my few observations:
>>
>> 1.
>> +/*
>> + * stop_backup() - ends an online backup
>> + *
>> + * The function is called at the end of an online backup. It sends out
>> pg_control
>> + * file, optionally WAL segments and ending WAL location.
>> + */
>>
>> Comments seem out-dated.
>>
>> 2. With parallel jobs, maxrate is now not supported. Since we are now
>> asking
>> data in multiple threads throttling seems important here. Can you please
>> explain why have you disabled that?
>>
>> 3. As we are alwa

Re: WIP/PoC for parallel backup

2020-03-16 Thread Rajkumar Raghuwanshi
Hi Asif,

On testing further, I found when taking backup with -R, pg_basebackup
crashed
this crash is not consistently reproducible.

[edb@localhost bin]$ ./psql postgres -p 5432 -c "create table test (a
text);"
CREATE TABLE
[edb@localhost bin]$ ./psql postgres -p 5432 -c "insert into test values
('parallel_backup with -R recovery-conf');"
INSERT 0 1
[edb@localhost bin]$ ./pg_basebackup -p 5432 -j 2 -D /tmp/test_bkp/bkp -R
Segmentation fault (core dumped)

stack trace looks the same as it was on earlier reported crash with
tablespace.
--stack trace
[edb@localhost bin]$ gdb -q -c core.37915 pg_basebackup
Loaded symbols for /lib64/libnss_files.so.2
Core was generated by `./pg_basebackup -p 5432 -j 2 -D /tmp/test_bkp/bkp
-R'.
Program terminated with signal 11, Segmentation fault.
#0  0x004099ee in worker_get_files (wstate=0xc1e458) at
pg_basebackup.c:3175
3175 backupinfo->curr = fetchfile->next;
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  0x004099ee in worker_get_files (wstate=0xc1e458) at
pg_basebackup.c:3175
#1  0x00408a9e in worker_run (arg=0xc1e458) at pg_basebackup.c:2715
#2  0x003921a07aa1 in start_thread (arg=0x7f72207c0700) at
pthread_create.c:301
#3  0x0039212e8c4d in clone () at
../sysdeps/unix/sysv/linux/x86_64/clone.S:115
(gdb)

Thanks & Regards,
Rajkumar Raghuwanshi


On Mon, Mar 16, 2020 at 2:14 PM Jeevan Chalke <
jeevan.cha...@enterprisedb.com> wrote:

> Hi Asif,
>
>
>> Thanks Rajkumar. I have fixed the above issues and have rebased the patch
>> to the latest master (b7f64c64).
>> (V9 of the patches are attached).
>>
>
> I had a further review of the patches and here are my few observations:
>
> 1.
> +/*
> + * stop_backup() - ends an online backup
> + *
> + * The function is called at the end of an online backup. It sends out
> pg_control
> + * file, optionally WAL segments and ending WAL location.
> + */
>
> Comments seem out-dated.
>
> 2. With parallel jobs, maxrate is now not supported. Since we are now
> asking
> data in multiple threads throttling seems important here. Can you please
> explain why have you disabled that?
>
> 3. As we are always fetching a single file and as Robert suggested, let
> rename
> SEND_FILES to SEND_FILE instead.
>
> 4. Does this work on Windows? I mean does pthread_create() work on Windows?
> I asked this as I see that pgbench has its own implementation for
> pthread_create() for WIN32 but this patch doesn't.
>
> 5. Typos:
> tablspace => tablespace
> safly => safely
>
> 6. parallel_backup_run() needs some comments explaining the states it goes
> through PB_* states.
>
> 7.
> +case PB_FETCH_REL_FILES:/* fetch files from server */
> +if (backupinfo->activeworkers == 0)
> +{
> +backupinfo->backupstate = PB_STOP_BACKUP;
> +free_filelist(backupinfo);
> +}
> +break;
> +case PB_FETCH_WAL_FILES:/* fetch WAL files from server */
> +if (backupinfo->activeworkers == 0)
> +{
> +backupinfo->backupstate = PB_BACKUP_COMPLETE;
> +}
> +break;
>
> Why free_filelist() is not called in PB_FETCH_WAL_FILES case?
>
> Thanks
> --
> Jeevan Chalke
> Associate Database Architect & Team Lead, Product Development
> EnterpriseDB Corporation
> The Enterprise PostgreSQL Company
>
> Phone: +91 20 66449694
>
> Website: www.enterprisedb.com
> EnterpriseDB Blog: http://blogs.enterprisedb.com/
> Follow us on Twitter: http://www.twitter.com/enterprisedb
>
> This e-mail message (and any attachment) is intended for the use of the
> individual or entity to whom it is addressed. This message contains
> information from EnterpriseDB Corporation that may be privileged,
> confidential, or exempt from disclosure under applicable law. If you are
> not the intended recipient or authorized to receive this for the intended
> recipient, any use, dissemination, distribution, retention, archiving, or
> copying of this communication is strictly prohibited. If you have received
> this e-mail in error, please notify the sender immediately by reply e-mail
> and delete this message.
>


Re: WIP/PoC for parallel backup

2020-03-16 Thread Rajkumar Raghuwanshi
On Mon, Mar 16, 2020 at 11:52 AM Asif Rehman  wrote:

>
>
> On Mon, Mar 16, 2020 at 11:08 AM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Thanks for the patches.
>>
>> I have verified reported issues with new patches, issues are fixed now.
>>
>> I got another observation where If a new slot name given without -C
>> option, it leads to server crash error.
>>
>> [edb@localhost bin]$ ./pg_basebackup -p 5432 -j 4 -D /tmp/bkp --slot
>> test_bkp_slot
>> pg_basebackup: error: could not send replication command
>> "START_REPLICATION": ERROR:  replication slot "test_bkp_slot" does not exist
>> pg_basebackup: error: could not list backup files: server closed the
>> connection unexpectedly
>> This probably means the server terminated abnormally
>> before or while processing the request.
>> pg_basebackup: removing data directory "/tmp/bkp"
>>
>
> It seems to be an expected behavior. The START_BACKUP command has been
> executed, and
> pg_basebackup tries to start a WAL streaming process with a non-existent
> slot, which results in
> an error. So the backup is aborted while terminating all other processes.
>
I think error message can be improved. current error message looks like
database server is crashed.

on PG same is existing with exit 1.
[edb@localhost bin]$ ./pg_basebackup -p 5432 -D /tmp/bkp --slot
test_bkp_slot
pg_basebackup: error: could not send replication command
"START_REPLICATION": ERROR:  replication slot "test_bkp_slot" does not exist
pg_basebackup: error: child process exited with exit code 1
pg_basebackup: removing data directory "/tmp/bkp"


>
>
> --
> Asif Rehman
> Highgo Software (Canada/China/Pakistan)
> URL : www.highgo.ca
>
>


Re: WIP/PoC for parallel backup

2020-03-16 Thread Rajkumar Raghuwanshi
Thanks for the patches.

I have verified reported issues with new patches, issues are fixed now.

I got another observation where If a new slot name given without -C option,
it leads to server crash error.

[edb@localhost bin]$ ./pg_basebackup -p 5432 -j 4 -D /tmp/bkp --slot
test_bkp_slot
pg_basebackup: error: could not send replication command
"START_REPLICATION": ERROR:  replication slot "test_bkp_slot" does not exist
pg_basebackup: error: could not list backup files: server closed the
connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
pg_basebackup: removing data directory "/tmp/bkp"

Thanks & Regards,
Rajkumar Raghuwanshi


On Fri, Mar 13, 2020 at 9:51 PM Asif Rehman  wrote:

>
> On Wed, Mar 11, 2020 at 2:38 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Hi Asif
>>
>> I have started testing this feature. I have applied v6 patch on commit
>> a069218163704c44a8996e7e98e765c56e2b9c8e (30 Jan).
>> I got few observations, please take a look.
>>
>> *--if backup failed, backup directory is not getting removed.*
>> [edb@localhost bin]$ ./pg_basebackup -p 5432 --jobs=9 -D
>> /tmp/test_bkp/bkp6
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> [edb@localhost bin]$ ./pg_basebackup -p 5432 --jobs=8 -D
>> /tmp/test_bkp/bkp6
>> pg_basebackup: error: directory "/tmp/test_bkp/bkp6" exists but is not
>> empty
>>
>>
>> *--giving large number of jobs leading segmentation fault.*
>> ./pg_basebackup -p 5432 --jobs=1000 -D /tmp/t3
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> .
>> .
>> .
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: FATAL:  number of
>> requested standby connections exceeds max_wal_senders (currently 10)
>> pg_basebackup: error: could not connect to server: could not fork new
>> process for connection: Resource temporarily unavailable
>>
>> could not fork new process for connection: Resource temporarily
>> unavailable
>> pg_basebackup: error: failed to create thread: Resource temporarily
>> unavailable
>> Segmentation fault (core dumped)
>>
>> --stack-trace
>> gdb -q -c core.11824 pg_basebackup
>> Loaded symbols for /lib64/libnss_files.so.2
>> Core was generated by `./pg_basebackup -p 5432 --jobs=1000 -D
>> /tmp/test_bkp/bkp10'.
>> Program terminated with signal 11, Segmentation fault.
>> #0  pthread_join (threadid=140503120623360, thread_return=0x0) at
>> pthread_join.c:46
>> 46  if (INVALID_NOT_TERMINATED_TD_P (pd))
>> Missing separate debuginfos, use: debuginfo-install
>> keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
>> libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
>> openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
>> (gdb) bt
>> #0  pthread_join (threadid=140503120623360, thread_return=0x0) at
>> pthread_join.c:46
>> #1  0x00408e21 in cleanup_workers () at pg_basebackup.c:2840
>> #2  0x00403846 in disconnect_atexit () at pg_basebackup.c:316
>> #3  0x003921235a02 in __run_exit_handlers (status=1) at exit.c:78
>> #4  exit (status=1) at exit.c:100
>> #5  0x00408aa6 in create_parallel_workers (backupinfo=0x1a4b8c0)
>> at pg_basebackup.c:2713
>> #6  0x00407946 in BaseBackup () at pg_basebackup.c:2127
>> #7  0x0040895c in main (argc=6, argv=0x7ffd566f4718) at
>> pg_basebackup.c:2668
>>
>>
>> *--with tablespace is in the same directory as data, parallel_backup
>> crashed*
>> [edb@localhost bin]$ ./initdb -D /tmp/data
>> [edb@localhost bin]$ ./pg_ctl -D /tmp/data -l /tmp/logfile start
>> [edb@localhost bin]

Re: WIP/PoC for parallel backup

2020-03-11 Thread Rajkumar Raghuwanshi
Hi Asif

I have started testing this feature. I have applied v6 patch on commit
a069218163704c44a8996e7e98e765c56e2b9c8e (30 Jan).
I got few observations, please take a look.

*--if backup failed, backup directory is not getting removed.*
[edb@localhost bin]$ ./pg_basebackup -p 5432 --jobs=9 -D /tmp/test_bkp/bkp6
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
[edb@localhost bin]$ ./pg_basebackup -p 5432 --jobs=8 -D /tmp/test_bkp/bkp6
pg_basebackup: error: directory "/tmp/test_bkp/bkp6" exists but is not empty


*--giving large number of jobs leading segmentation fault.*
./pg_basebackup -p 5432 --jobs=1000 -D /tmp/t3
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
.
.
.
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: FATAL:  number of
requested standby connections exceeds max_wal_senders (currently 10)
pg_basebackup: error: could not connect to server: could not fork new
process for connection: Resource temporarily unavailable

could not fork new process for connection: Resource temporarily unavailable
pg_basebackup: error: failed to create thread: Resource temporarily
unavailable
Segmentation fault (core dumped)

--stack-trace
gdb -q -c core.11824 pg_basebackup
Loaded symbols for /lib64/libnss_files.so.2
Core was generated by `./pg_basebackup -p 5432 --jobs=1000 -D
/tmp/test_bkp/bkp10'.
Program terminated with signal 11, Segmentation fault.
#0  pthread_join (threadid=140503120623360, thread_return=0x0) at
pthread_join.c:46
46  if (INVALID_NOT_TERMINATED_TD_P (pd))
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  pthread_join (threadid=140503120623360, thread_return=0x0) at
pthread_join.c:46
#1  0x00408e21 in cleanup_workers () at pg_basebackup.c:2840
#2  0x00403846 in disconnect_atexit () at pg_basebackup.c:316
#3  0x003921235a02 in __run_exit_handlers (status=1) at exit.c:78
#4  exit (status=1) at exit.c:100
#5  0x00408aa6 in create_parallel_workers (backupinfo=0x1a4b8c0) at
pg_basebackup.c:2713
#6  0x00407946 in BaseBackup () at pg_basebackup.c:2127
#7  0x0040895c in main (argc=6, argv=0x7ffd566f4718) at
pg_basebackup.c:2668


*--with tablespace is in the same directory as data, parallel_backup
crashed*
[edb@localhost bin]$ ./initdb -D /tmp/data
[edb@localhost bin]$ ./pg_ctl -D /tmp/data -l /tmp/logfile start
[edb@localhost bin]$ mkdir /tmp/ts
[edb@localhost bin]$ ./psql postgres
psql (13devel)
Type "help" for help.

postgres=# create tablespace ts location '/tmp/ts';
CREATE TABLESPACE
postgres=# create table tx (a int) tablespace ts;
CREATE TABLE
postgres=# \q
[edb@localhost bin]$ ./pg_basebackup -j 2 -D /tmp/tts -T /tmp/ts=/tmp/ts1
Segmentation fault (core dumped)

--stack-trace
[edb@localhost bin]$ gdb -q -c core.15778 pg_basebackup
Loaded symbols for /lib64/libnss_files.so.2
Core was generated by `./pg_basebackup -j 2 -D /tmp/tts -T
/tmp/ts=/tmp/ts1'.
Program terminated with signal 11, Segmentation fault.
#0  0x00409442 in get_backup_filelist (conn=0x140cb20,
backupInfo=0x14210a0) at pg_basebackup.c:3000
3000 backupInfo->curr->next = file;
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-24.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-58.el6_10.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  0x00409442 in get_backup_filelist (conn=0x140cb20,
backupInfo=0x14210a0) at pg_basebackup.c:3000
#1  0x00408b56 in parallel_backup_run (backupinfo=0x14210a0) at
pg_basebackup.c:2739
#2  0x00407955 in BaseBackup () at pg_basebackup.c:2128
#3  0x0040895c in main (argc=7, argv=0x7ffca2910c58) at
pg_basebackup.c:2668
(gdb)

Thanks & Regards,
Rajkumar Raghuwanshi


On Tue, Feb 25, 2020 at 7:49 PM Asif Rehman  wrote:

> Hi,
>
> I have created a commitfest entry.
> https://commitfest.postgresql.org/27/2472/
>
>
> On Mon, Feb 17, 2020 at 1:3

Re: backup manifests

2020-03-04 Thread Rajkumar Raghuwanshi
Hi,

In a negative test scenario, if I changed size to -1 in backup_manifest,
pg_validatebackup giving
error with a random size number.

[edb@localhost bin]$ ./pg_basebackup -p 5551 -D /tmp/bold
--manifest-checksum 'SHA256'
[edb@localhost bin]$ ./pg_validatebackup /tmp/bold
pg_validatebackup: backup successfully verified

--change a file size to -1 and generate new checksum.
[edb@localhost bin]$ vi /tmp/bold/backup_manifest
[edb@localhost bin]$ shasum -a256 /tmp/bold/backup_manifest
c3d7838cbbf991c6108f9c1ab78f673c20d8073114500f14da6ed07ede2dc44a
 /tmp/bold/backup_manifest
[edb@localhost bin]$ vi /tmp/bold/backup_manifest

[edb@localhost bin]$ ./pg_validatebackup /tmp/bold
pg_validatebackup: error: "global/4183" has size 0 on disk but size
*18446744073709551615* in the manifest

Thanks & Regards,
Rajkumar Raghuwanshi


On Thu, Mar 5, 2020 at 9:37 AM Suraj Kharage 
wrote:

>
> On Wed, Mar 4, 2020 at 7:21 PM tushar 
> wrote:
>
>> Hi,
>>
>> There is a scenario in which i add something inside the pg_tablespace
>> directory , i am getting an error like-
>>
>> pg_validatebackup: * manifest_checksum =
>> 77ddacb4e7e02e2b880792a19a3adf09266dd88553dd15cfd0c22caee7d9cc04
>> pg_validatebackup: error: "pg_tblspc/16385/*PG_13_202002271*/test" is
>> present on disk but not in the manifest
>>
>> but if i remove 'PG_13_202002271 ' directory then there is no error
>>
>> [centos@tushar-ldap-docker bin]$ ./pg_validatebackup data
>> pg_validatebackup: * manifest_checksum =
>> 77ddacb4e7e02e2b880792a19a3adf09266dd88553dd15cfd0c22caee7d9cc04
>> pg_validatebackup: backup successfully verified
>>
>>
> This seems expected considering current design as we don't log the
> directory entries in backup_manifest. In your case, you have tablespace
> with no objects (empty tablespace) then backup_manifest does not have any
> entry for this hence when you remove this tablespace directory, validator
> could not detect it.
>
> We can either document it or add the entry for directories in the
> manifest. Robert may have a better idea on this.
>
> --
> --
>
> Thanks & Regards,
> Suraj kharage,
> EnterpriseDB Corporation,
> The Postgres Database Company.
>


Re: block-level incremental backup

2019-08-30 Thread Rajkumar Raghuwanshi
Hi,

I am doing some testing on pg_basebackup and pg_combinebackup patches. I
have also tried to create tap test for pg_combinebackup by taking
reference from pg_basebackup tap cases.
Attaching first draft test patch.

I have done some testing with compression options, both -z and -Z level is
working with incremental backup.

A minor comment : It is mentioned in pg_combinebackup help that maximum 10
incremental backup can be given with -i option, but I found maximum 9
incremental backup directories can be given at a time.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


On Thu, Aug 29, 2019 at 10:06 PM Jeevan Ladhe 
wrote:

> Due to the inherent nature of pg_basebackup, the incremental backup also
> allows taking backup in tar and compressed format. But, pg_combinebackup
> does not understand how to restore this. I think we should either make
> pg_combinebackup support restoration of tar incremental backup or restrict
> taking the incremental backup in tar format until pg_combinebackup
> supports the restoration by making option '--lsn' and '-Ft' exclusive.
>
> It is arguable that one can take the incremental backup in tar format,
> extract
> that manually and then give the resultant directory as input to the
> pg_combinebackup, but I think that kills the purpose of having
> pg_combinebackup utility.
>
> Thoughts?
>
> Regards,
> Jeevan Ladhe
>
diff --git a/src/bin/pg_combinebackup/t/pg_combinebackup.pl b/src/bin/pg_combinebackup/t/pg_combinebackup.pl
new file mode 100644
index 000..e0f834a
--- /dev/null
+++ b/src/bin/pg_combinebackup/t/pg_combinebackup.pl
@@ -0,0 +1,79 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use File::Basename qw(basename dirname);
+use File::Path qw(rmtree);
+use PostgresNode;
+use TestLib;
+use Test::More tests => 23;
+
+program_help_ok('pg_combinebackup');
+program_version_ok('pg_combinebackup');
+program_options_handling_ok('pg_combinebackup');
+
+my $tempdir = TestLib::tempdir;
+
+my $node = get_new_node('main');
+
+# Initialize node
+$node->init();
+my $pgdata = $node->data_dir;
+
+# Change wal related setting for pg_basebackup to run
+open my $conf, '>>', "$pgdata/postgresql.conf";
+print $conf "max_replication_slots = 10\n";
+print $conf "max_wal_senders = 10\n";
+print $conf "wal_level = replica\n";
+close $conf;
+$node->start;
+
+$node->command_fails(['pg_combinebackup'],
+	'pg_combinebackup needs full and incremental directory specified');
+
+# Create an unlogged table to test that forks other than init are not copied.
+$node->safe_psql('postgres', 'CREATE UNLOGGED TABLE base_unlogged (id int)');
+
+my $baseUnloggedPath = $node->safe_psql('postgres',
+	q{select pg_relation_filepath('base_unlogged')});
+
+# Make sure main and init forks exist
+ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
+ok(-f "$pgdata/$baseUnloggedPath",'unlogged main fork in base');
+
+# Run full base backup.
+$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup"],
+	'pg_basebackup runs for full backup');
+ok(-f "$tempdir/backup/PG_VERSION", 'full backup was created');
+
+# Unlogged relation forks other than init should not be copied
+ok(-f "$tempdir/backup/${baseUnloggedPath}_init",
+	'unlogged init fork in backup');
+ok( !-f "$tempdir/backup/$baseUnloggedPath",
+	'unlogged main fork not in backup');
+
+# Get LSN of last backup to use for incremental backupslurp_file
+my @extract_lsn = split (" ", scalar TestLib::slurp_file("$tempdir/backup/backup_label"));
+my $LSN = $extract_lsn[3];
+
+# Run incr base backup.
+$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup1",'--lsn', "$LSN"],
+	'pg_basebackup runs for incremental backup');
+ok(-f "$tempdir/backup1/PG_VERSION", 'incremental backup was created');
+
+# Unlogged relation forks other than init should not be copied
+ok(-f "$tempdir/backup1/${baseUnloggedPath}_init",
+	'unlogged init fork in backup');
+ok( !-f "$tempdir/backup1/$baseUnloggedPath",
+	'unlogged main fork not in backup');
+
+# Run pg_combinebackup.
+$node->command_ok([ 'pg_combinebackup', '-f', "$tempdir/backup", '-i', "$tempdir/backup1", '-o', "$tempdir/backup2"],
+	'pg_combinebackup runs');
+ok(-f "$tempdir/backup2/PG_VERSION", 'combined backup was created');
+
+# Unlogged relation forks other than init should not be copied
+ok(-f "$tempdir/backup2/${baseUnloggedPath}_init",
+	'unlogged init fork in backup');
+ok( !-f "$tempdir/backup2/$baseUnloggedPath",
+	'unlogged main fork not in backup');


getting ERROR "relation 16401 has no triggers" with partition foreign key alter

2019-07-16 Thread Rajkumar Raghuwanshi
Hi,

I am getting ERROR:  relation 16401 has no triggers error while executing
below query.

postgres=# create table tbl1(f1 int primary key);
CREATE TABLE
postgres=# create table tbl2(f1 int references tbl1 deferrable initially
deferred) partition by range(f1);
CREATE TABLE
postgres=# create table tbl2_p1 partition of tbl2 for values from
(minvalue) to (maxvalue);
CREATE TABLE
postgres=# insert into tbl1 values(1);
INSERT 0 1
postgres=# begin;
BEGIN
postgres=# insert into tbl2 values(1);
INSERT 0 1
postgres=# alter table tbl2 drop constraint tbl2_f1_fkey;
ALTER TABLE
postgres=# commit;
ERROR:  relation 16395 has no triggers

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: Postgres 11: Table Partitioning and Primary Keys

2019-07-09 Thread Rajkumar Raghuwanshi
On Tue, Jul 9, 2019 at 8:29 AM Michael Paquier  wrote:

> On Mon, Jul 08, 2019 at 10:37:37PM -0400, Bruce Momjian wrote:
> > On Fri, Jul  5, 2019 at 09:20:07PM +, PG Doc comments form wrote:
> >> In the documentation for Postgres 11 table partitioning, there is no
> mention
> >> of the requirement that the Primary Key of a partitioned table must
> contain
> >> the partition key.
> >> In fact the documentation on primary keys is so light that I am not even
> >> 100% sure the above is correct.  If the following table is not possible
> in
> >> Postgres 11, the documentation should find some way to make that
> clear.
> >>
> >> I believe this should be documented in section "5.10.2.3. Limitations"
> >
> > Can someone comment on this?  CC to hackers.
>
> Yep, that's the case:
> =# CREATE TABLE parent_tab (id int, id2 int primary key)
>  PARTITION BY RANGE (id);
> ERROR:  0A000: insufficient columns in PRIMARY KEY constraint
> definition
> DETAIL:  PRIMARY KEY constraint on table "parent_tab" lacks column
> "id" which is part of the partition key.
> LOCATION:  DefineIndex, indexcmds.c:894
>
same is valid for UNIQUE constraint also.

postgres=# CREATE TABLE parent_tab (id int, id2 int unique)
 PARTITION BY RANGE (id);
ERROR:  insufficient columns in UNIQUE constraint definition
DETAIL:  UNIQUE constraint on table "parent_tab" lacks column "id" which is
part of the partition key.



>
> I agree with the report here that adding one sentence to 5.10.2.3
> which is for the limitations of declarative partitioning would be a
> good idea.  We don't mention the limitation in CREATE TABLE either
> (which would be rather incorrect IMO).
>
> Attached is an idea of patch for the documentation, using this
> wording:
> + 
> +  
> +   When defining a primary key on a partitioned table, the primary
> +   key column must be included in the partition key.
> +  
> + 
> If somebody has any better idea for that paragraph, please feel free.
> --
> Michael
>


Statistical aggregate functions are not working with PARTIAL aggregation

2019-05-07 Thread Rajkumar Raghuwanshi
Hi,
As this issue is reproducible without partition-wise aggregate also,
changing email subject from "Statistical aggregate functions are not
working with partitionwise aggregate " to "Statistical aggregate functions
are not working with PARTIAL aggregation".

original reported test case and discussion can be found at below link.
https://www.postgresql.org/message-id/flat/CAKcux6%3DuZEyWyLw0N7HtR9OBc-sWEFeByEZC7t-KDf15FKxVew%40mail.gmail.com

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


On Fri, May 3, 2019 at 5:26 PM Jeevan Chalke 
wrote:

>
>
> On Fri, May 3, 2019 at 2:56 PM Rajkumar Raghuwanshi <
> rajkumar.raghuwan...@enterprisedb.com> wrote:
>
>> Hi,
>>
>> On PG-head, Some of statistical aggregate function are not giving correct
>> output when enable partitionwise aggregate while same is working on v11.
>>
>
> I had a quick look over this and observed that something broken with the
> PARTIAL aggregation.
>
> I can reproduce same issue with the larger dataset which results into
> parallel scan.
>
> CREATE TABLE tbl1(a int2,b float4) partition by range(a);
> create table tbl1_p1 partition of tbl1 for values from (minvalue) to (0);
> create table tbl1_p2 partition of tbl1 for values from (0) to (maxvalue);
> insert into tbl1 select i%2, i from generate_series(1, 100) i;
>
> # SELECT regr_count(b, a) FROM tbl1;
>  regr_count
> 
>   0
> (1 row)
>
> postgres:5432 [120536]=# explain SELECT regr_count(b, a) FROM tbl1;
>QUERY
> PLAN
>
> 
>  Finalize Aggregate  (cost=15418.08..15418.09 rows=1 width=8)
>->  Gather  (cost=15417.87..15418.08 rows=2 width=8)
>  Workers Planned: 2
>  ->  Partial Aggregate  (cost=14417.87..14417.88 rows=1 width=8)
>->  Parallel Append  (cost=0.00..11091.62 rows=443500
> width=6)
>  ->  Parallel Seq Scan on tbl1_p2  (cost=0.00..8850.00
> rows=442500 width=6)
>  ->  Parallel Seq Scan on tbl1_p1  (cost=0.00..24.12
> rows=1412 width=6)
> (7 rows)
>
> postgres:5432 [120536]=# set max_parallel_workers_per_gather to 0;
> SET
> postgres:5432 [120536]=# SELECT regr_count(b, a) FROM tbl1;
>  regr_count
> 
> 100
> (1 row)
>
> After looking further, it seems that it got broken by following commit:
>
> commit a9c35cf85ca1ff72f16f0f10d7ddee6e582b62b8
> Author: Andres Freund 
> Date:   Sat Jan 26 14:17:52 2019 -0800
>
> Change function call information to be variable length.
>
>
> This commit is too big to understand and thus could not get into the
> excact cause.
>
> Thanks
>
>
>> below are some of examples.
>>
>> CREATE TABLE tbl(a int2,b float4) partition by range(a);
>> create table tbl_p1 partition of tbl for values from (minvalue) to (0);
>> create table tbl_p2 partition of tbl for values from (0) to (maxvalue);
>> insert into tbl values (-1,-1),(0,0),(1,1),(2,2);
>>
>> --when partitionwise aggregate is off
>> postgres=# SELECT regr_count(b, a) FROM tbl;
>>  regr_count
>> 
>>   4
>> (1 row)
>> postgres=# SELECT regr_avgx(b, a), regr_avgy(b, a) FROM tbl;
>>  regr_avgx | regr_avgy
>> ---+---
>>0.5 |   0.5
>> (1 row)
>> postgres=# SELECT corr(b, a) FROM tbl;
>>  corr
>> --
>> 1
>> (1 row)
>>
>> --when partitionwise aggregate is on
>> postgres=# SET enable_partitionwise_aggregate = true;
>> SET
>> postgres=# SELECT regr_count(b, a) FROM tbl;
>>  regr_count
>> 
>>   0
>> (1 row)
>> postgres=# SELECT regr_avgx(b, a), regr_avgy(b, a) FROM tbl;
>>  regr_avgx | regr_avgy
>> ---+---
>>|
>> (1 row)
>> postgres=# SELECT corr(b, a) FROM tbl;
>>  corr
>> --
>>
>> (1 row)
>>
>> Thanks & Regards,
>> Rajkumar Raghuwanshi
>> QMG, EnterpriseDB Corporation
>>
>
>
> --
> Jeevan Chalke
> Technical Architect, Product Development
> EnterpriseDB Corporation
> The Enterprise PostgreSQL Company
>
>


Statistical aggregate functions are not working with partitionwise aggregate

2019-05-03 Thread Rajkumar Raghuwanshi
Hi,

On PG-head, Some of statistical aggregate function are not giving correct
output when enable partitionwise aggregate while same is working on v11.

below are some of examples.

CREATE TABLE tbl(a int2,b float4) partition by range(a);
create table tbl_p1 partition of tbl for values from (minvalue) to (0);
create table tbl_p2 partition of tbl for values from (0) to (maxvalue);
insert into tbl values (-1,-1),(0,0),(1,1),(2,2);

--when partitionwise aggregate is off
postgres=# SELECT regr_count(b, a) FROM tbl;
 regr_count

  4
(1 row)
postgres=# SELECT regr_avgx(b, a), regr_avgy(b, a) FROM tbl;
 regr_avgx | regr_avgy
---+---
   0.5 |   0.5
(1 row)
postgres=# SELECT corr(b, a) FROM tbl;
 corr
--
1
(1 row)

--when partitionwise aggregate is on
postgres=# SET enable_partitionwise_aggregate = true;
SET
postgres=# SELECT regr_count(b, a) FROM tbl;
 regr_count

  0
(1 row)
postgres=# SELECT regr_avgx(b, a), regr_avgy(b, a) FROM tbl;
 regr_avgx | regr_avgy
---+---
   |
(1 row)
postgres=# SELECT corr(b, a) FROM tbl;
 corr
--

(1 row)

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] advanced partition matching algorithm for partition-wise join

2019-04-26 Thread Rajkumar Raghuwanshi
On Wed, Apr 24, 2019 at 4:56 PM amul sul  wrote:

> Attached version is rebase atop of the latest master head(fdc7efcc30), also
> incorporates the Ashutosh's suggestion, thanks.
>
Thanks for rebase patch, patches applied cleanly on PG head.
I did some crash testing with extra test case [0006 patch] [1] and found no
more issue.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB.

[1]
https://www.postgresql.org/message-id/CAFjFpReKuV_4LRRfdy80BqX8oZfwbo%2BHWLQNv3CsJ5iGPSyTfA%40mail.gmail.com
<https://www.postgresql.org/message-id/CA%2Bq6zcU3X4%3DBfqnWXAUPBFtKK7vy0HO7-%2BmAW6KB2Zy_EPtC_Q%40mail.gmail.com>



>
> Regards,
> Amul
>
> On Mon, Mar 11, 2019 at 10:14 PM Ashutosh Bapat <
> ashutosh.bapat@gmail.com> wrote:
>
>>
>>
>> On Mon, Mar 11, 2019 at 10:40 AM amul sul  wrote:
>>
>>>
>>> All the places from where this handle_missing_partition() get called
>>> have the following code to decide the value for
>>> missing_side_outer/_inner which
>>> I yet to understand. Do you think this has some flaw?
>>>
>>> /*
>>>  * For a FULL join, inner relation acts as both OUTER and INNER
>>>  * relation.  For LEFT and ANTI join the inner relation acts as
>>>  * INNER relation. For INNER and SEMI join OUTER and INNER
>>>  * differentiation is immaterial.
>>>  */
>>> missing_side_inner = (jointype == JOIN_FULL ||
>>>   jointype == JOIN_LEFT ||
>>>   jointype == JOIN_ANTI);
>>> missing_side_outer = (jointype == JOIN_FULL);
>>>
>>
>> I was wrong, sorry. The comment says it all.
>>
>>
>>>
>>>
>>>
>>>> argument value which fails to set merged_index.
>>>>>
>>>>> In the attached patch, I tried to fix this case by setting
>>>>> merged_index
>>>>> explicitly which fixes the reported crash.
>>>>>
>>>>
>>>> I expect handle_missing_partition() to set the merged_index always. In
>>>> your patches, I don't see that function in your patches is setting it
>>>> explicitly. If we are setting merged_index explicitly somewhere else, other
>>>> places may miss that explicit assignment. So it's better to move it inside
>>>> this function.
>>>>
>>>>
>>>
>>> Ok, that can be fixed.
>>>
>>> Similarly, I think merge_null_partitions should set null_index instead
>>> of
>>> asserting when null partitions missing from both the side, make sense?
>>>
>>
>> I think not. null_index, once set shouldn't change and hence does not
>> change with each pair of partitions being matched. So, it makes sense to
>> make sure that null_index remains invalid if none of the tables have null
>> partition.
>>
>> --
>> Best Wishes,
>> Ashutosh Bapat
>>
>


Re: [HACKERS] advanced partition matching algorithm for partition-wise join

2019-03-08 Thread Rajkumar Raghuwanshi
On Thu, Mar 7, 2019 at 8:20 PM amul sul  wrote:

>
>
> On Thu, Mar 7, 2019 at 1:02 PM amul sul  wrote:
>
>> Thanks Rajkumar,
>>
>> I am looking into this.
>>
>>
> The crash happens when none of the if-else branch of
> handle_missing_partition()
> evaluates and returns merged_index unassigned.
>
> Let me explain, in Rajkumar 's test case, the join type is JOIN_INNER.
> When
> only outer rel has null partition, merge_null_partitions() function calls
> handle_missing_partition() with missing_side_inner = false and
> missing_side_outer = false argument value which fails to set merged_index.
>
> In the attached patch, I tried to fix this case by setting merged_index
> explicitly which fixes the reported crash.
>
Thanks Amul, with v20 patches, crash is fixed.


>
> Regards,
> Amul
>
>
>
>> On Thu, Mar 7, 2019 at 11:54 AM Rajkumar Raghuwanshi <
>> rajkumar.raghuwan...@enterprisedb.com> wrote:
>>
>>>
>>>
>>> On Tue, Mar 5, 2019 at 3:45 PM amul sul  wrote:
>>>
>>>> Attached is the rebased atop of the latest master head(35bc0ec7c8).
>>>>
>>> thanks Amul, patches applied cleanly on PG head.
>>>
>>> While testing this I got a server crash with below test case.
>>>
>>> CREATE TABLE plt1 (a int, b int, c varchar) PARTITION BY LIST(c);
>>> CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN
>>> ('0001','0002','0003');
>>> CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN
>>> ('0004','0005','0006');
>>> CREATE TABLE plt1_p3 PARTITION OF plt1 FOR VALUES IN
>>> (NULL,'0008','0009');
>>> CREATE TABLE plt1_p4 PARTITION OF plt1 FOR VALUES IN ('','0010');
>>> INSERT INTO plt1 SELECT i, i % 47, to_char(i % 17, 'FM') FROM
>>> generate_series(0, 500) i WHERE i % 17 NOT IN (7, 11, 12, 13, 14, 15,16);
>>> INSERT INTO plt1 SELECT i, i % 47, case when i % 17 = 7 then NULL else
>>> to_char(i % 17, 'FM') end FROM generate_series(0, 500) i WHERE i % 17
>>> IN (7,8,9);
>>> ANALYSE plt1;
>>>
>>> CREATE TABLE plt2 (a int, b int, c varchar) PARTITION BY LIST(c);
>>> CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN ('0002','0003');
>>> CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN
>>> ('0004','0005','0006');
>>> CREATE TABLE plt2_p3 PARTITION OF plt2 FOR VALUES IN
>>> ('0007','0008','0009');
>>> CREATE TABLE plt2_p4 PARTITION OF plt2 FOR VALUES IN
>>> ('',NULL,'0012');
>>> INSERT INTO plt2 SELECT i, i % 47, to_char(i % 17, 'FM') FROM
>>> generate_series(0, 500) i WHERE i % 17 NOT IN (1, 10, 11, 13, 14, 15, 16);
>>> INSERT INTO plt2 SELECT i, i % 47, case when i % 17 = 11 then NULL else
>>> to_char(i % 17, 'FM') end FROM generate_series(0, 500) i WHERE i % 17
>>> IN (0,11,12);
>>> ANALYZE plt2;
>>>
>>> CREATE TABLE plt1_e (a int, b int, c text) PARTITION BY LIST(ltrim(c,
>>> 'A'));
>>> CREATE TABLE plt1_e_p1 PARTITION OF plt1_e FOR VALUES IN ('0002',
>>> '0003');
>>> CREATE TABLE plt1_e_p2 PARTITION OF plt1_e FOR VALUES IN ('0004',
>>> '0005', '0006');
>>> CREATE TABLE plt1_e_p3 PARTITION OF plt1_e FOR VALUES IN ('0008',
>>> '0009');
>>> CREATE TABLE plt1_e_p4 PARTITION OF plt1_e FOR VALUES IN ('');
>>> INSERT INTO plt1_e SELECT i, i % 47, to_char(i % 17, 'FM') FROM
>>> generate_series(0, 500) i WHERE i % 17 NOT IN (1, 7, 10, 11, 12, 13, 14,
>>> 15, 16);
>>> ANALYZE plt1_e;
>>>
>>> EXPLAIN (COSTS OFF)
>>> SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM
>>> plt1 t1, plt2 t2, plt1_e t3 WHERE t1.c = t2.c AND ltrim(t3.c, 'A') =
>>> t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c;
>>> server closed the connection unexpectedly
>>> This probably means the server terminated abnormally
>>> before or while processing the request.
>>> The connection to the server was lost. Attempting reset: Failed.
>>>
>>> below is stack trace,  looks like some indexes got corrupted, please
>>> take a look.
>>>
>>> Core was generated by `postgres: edb postgres [local]
>>> EXPLAIN   '.
>>> Program terminated with signal 11, Segmentation fault.
>>> #0  0x00821aee in map_and_merge_partitions (partmaps1=0x2c1c8a8,
>>> partmaps2=0x2c1c8e0, index1=45540240, index2=0, next_index=0x7ffeebd43d3c)
>>> at partbounds.c:4162
>>> 4162if (partmap1->from < 0 && partmap2->from <

Re: [HACKERS] advanced partition matching algorithm for partition-wise join

2019-03-06 Thread Rajkumar Raghuwanshi
els_needed=3, initial_rels=0x2bae500) at allpaths.c:2716
#10 0x007abdca in make_rel_from_joinlist (root=0x2be2a28,
joinlist=0x2bfbae8) at allpaths.c:2647
#11 0x007a86b0 in make_one_rel (root=0x2be2a28, joinlist=0x2bfbae8)
at allpaths.c:227
#12 0x007dada1 in query_planner (root=0x2be2a28, tlist=0x2ba01c8,
qp_callback=0x7e0b25 , qp_extra=0x7ffeebd44390) at
planmain.c:265
#13 0x007ddf83 in grouping_planner (root=0x2be2a28,
inheritance_update=false, tuple_fraction=0) at planner.c:1929
#14 0x007dc5f5 in subquery_planner (glob=0x2be2990,
parse=0x2c0e8c8, parent_root=0x0, hasRecursion=false, tuple_fraction=0) at
planner.c:997
#15 0x007db1b6 in standard_planner (parse=0x2c0e8c8,
cursorOptions=256, boundParams=0x0) at planner.c:416
#16 0x007daef7 in planner (parse=0x2c0e8c8, cursorOptions=256,
boundParams=0x0) at planner.c:276
#17 0x008e15c5 in pg_plan_query (querytree=0x2c0e8c8,
cursorOptions=256, boundParams=0x0) at postgres.c:878
#18 0x006562cc in ExplainOneQuery (query=0x2c0e8c8,
cursorOptions=256, into=0x0, es=0x2c0e0a0,
queryString=0x2aa24d8 "EXPLAIN (COSTS OFF)\nSELECT avg(t1.a),
avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM\nplt1 t1, plt2 t2,
plt1_e t3 WHERE t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c,
t2.c, t3.c ORDER BY t1"..., params=0x0, queryEnv=0x0) at explain.c:364

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB.


> Regards,
> Amul Sul
>
> On Mon, Feb 4, 2019 at 11:05 AM amul sul  wrote:
>
>> There are few whitespaces in 0002 patch that I have fixed in the attached
>> version.
>> Rest of the patches are untouched.
>>
>> Ill continue my review and testing.
>>
>> Regards,
>> Amul
>>
>> On Thu, Jan 31, 2019 at 5:26 PM Etsuro Fujita <
>> fujita.ets...@lab.ntt.co.jp> wrote:
>>
>>> (2019/01/22 21:38), Etsuro Fujita wrote:
>>> > Will continue to review.
>>>
>>> I rebased the patch set against the latest HEAD.  Attached is a new
>>> version.  I'll move this to the next CF, and continue to review it.
>>>
>>> Best regards,
>>> Etsuro Fujita
>>>
>>


Re: Getting ERROR: could not open file "base/13164/t3_16388" with partition table with ON COMMIT

2018-11-04 Thread Rajkumar Raghuwanshi
On Mon, Nov 5, 2018 at 5:49 AM Michael Paquier  wrote:

> On Fri, Nov 02, 2018 at 04:39:07PM +0900, Amit Langote wrote:
> > Agreed that they're two independent issues, although it wouldn't be such
> a
> > bad idea to fix them in one go, as they're both issues related to the
> > handling of ON COMMIT actions on tables in inheritance trees.
>
> I have pushed 0001 which fixes the bug reported on this thread down to
> v10
>
Thanks Michael, Amit.


pg_upgrade failed with ERROR: null relpartbound for relation 18159 error.

2018-10-04 Thread Rajkumar Raghuwanshi
Hi,

I am getting ERROR:  null relpartbound for relation 18159 while doing
pg_upgrade from v11 to v11/master.

-- user-defined operator class in partition key
CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql
  AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$;
CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS
  OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4),
  OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4),
  OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4);
CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops);
CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO
(1000);
INSERT INTO partkey_t VALUES (100);
INSERT INTO partkey_t VALUES (200);

--ran pg_upgrade failed with below error.
pg_restore: creating TABLE "public.partitioned"
pg_restore: creating TABLE "public.partkey_t"
pg_restore: creating TABLE "public.partkey_t_1"
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC entry 485; 1259 18159 TABLE
partkey_t_1 edb
pg_restore: [archiver (db)] could not execute query: ERROR:  null
relpartbound for relation 18159
CONTEXT:  SQL function "my_int4_sort"
Command was:
-- For binary upgrade, must preserve pg_type oid
SELECT
pg_catalog.binary_upgrade_set_next_pg_type_oid('18161'::pg_catalog.oid);


-- For binary upgrade, must preserve pg_type array oid
SELECT
pg_catalog.binary_upgrade_set_next_array_pg_type_oid('18160'::pg_catalog.oid);


-- For binary upgrade, must preserve pg_class oids
SELECT
pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('18159'::pg_catalog.oid);

CREATE TABLE "public"."partkey_t_1" (
"a" integer
);

-- For binary upgrade, recreate inherited column.
UPDATE pg_catalog.pg_attribute
SET attislocal = false
WHERE attname = 'a'
  AND attrelid = '"public"."partkey_t_1"'::pg_catalog.regclass;

-- For binary upgrade, set up inheritance and partitioning this way.
ALTER TABLE ONLY "public"."partkey_t" ATTACH PARTITION
"public"."partkey_t_1" FOR VALUES FROM (0) TO (1000);

-- For binary upgrade, set heap's relfrozenxid and relminmxid
UPDATE pg_catalog.pg_class
SET relfrozenxid = '1915', relminmxid = '1'
WHERE oid = '"public"."partkey_t_1"'::pg_catalog.regclass;

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: Multiple primary key on partition table?

2018-10-01 Thread Rajkumar Raghuwanshi
On Tue, Sep 18, 2018 at 11:20 AM amul sul  wrote:

> On Mon, Sep 17, 2018 at 9:06 PM amul sul  wrote:
> >
> > Nice catch Rajkumar.
> >
> > In index_check_primary_key(), relationHasPrimaryKey() called only for
> the an
> > alter command but I think we need to call in this case as well, like
> this:
> >
> > diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
> > index 7eb3e35166..c8714395fe 100644
> > --- a/src/backend/catalog/index.c
> > +++ b/src/backend/catalog/index.c
> > @@ -223,7 +223,7 @@ index_check_primary_key(Relation heapRel,
> >  * and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no
> >  * problem either.
> >  */
> > -   if (is_alter_table &&
> > +   if ((is_alter_table || heapRel->rd_rel->relispartition) &&
> > relationHasPrimaryKey(heapRel))
> > {
> > ereport(ERROR,
> >
> > Thoughts?
> >
>
> Here is the complete patch proposes the aforesaid fix with regression test.
>
Thanks, This worked for me.


>
> Regards,
> Amul
>


Re: Getting ERROR: could not open file "base/13164/t3_16388" with partition table with ON COMMIT

2018-09-17 Thread Rajkumar Raghuwanshi
On Fri, Sep 14, 2018 at 7:23 AM, Amit Langote  wrote:

> On 2018/09/13 23:13, Tom Lane wrote:
> > Amit Langote  writes:
> >> On 2018/09/13 1:14, Tom Lane wrote:
> >>> That seems excessively restrictive.  Anything that has storage (e.g.
> >>> matviews) ought to be truncatable, no?
> >
> >> Not by heap_truncate it seems.  The header comment of heap_truncate says
> >> that it concerns itself only with ON COMMIT truncation of temporary
> tables:
> >
> > Ah.  Well, in that case I'm OK with just a simple test for
> > RELKIND_RELATION, but I definitely feel that it should be inside
> > heap_truncate.  Callers don't need to know about the limited scope
> > of what that does.
>
> I guess you meant inside heap_truncate_one_rel.  I updated the patch that
> way, but I wonder if we shouldn't also allow other relkinds that have
> storage which RelationTruncate et al can technically deal with.
>
Verified. This patch fixed issue.


Multiple primary key on partition table?

2018-09-17 Thread Rajkumar Raghuwanshi
Hi,

I am able to create multiple primary key on partition table by executing
below statement.

[edb@localhost bin]$ ./psql postgres
psql (11beta3)
Type "help" for help.

postgres=# CREATE TABLE t1 (a int PRIMARY KEY,b int) PARTITION BY RANGE (a);
CREATE TABLE
postgres=# CREATE TABLE t1_p1 PARTITION OF t1 (b PRIMARY KEY) FOR VALUES
FROM (MINVALUE) TO (MAXVALUE);
CREATE TABLE
postgres=# \d+ t1_p1
   Table "public.t1_p1"
 Column |  Type   | Collation | Nullable | Default | Storage | Stats target
| Description
+-+---+--+-+-+--+-
 a  | integer |   | not null | | plain   |
|
 b  | integer |   | not null | | plain   |
|
Partition of: t1 FOR VALUES FROM (MINVALUE) TO (MAXVALUE)
Partition constraint: (a IS NOT NULL)
Indexes:
"t1_p1_pkey" PRIMARY KEY, btree (a)
"t1_p1_pkey1" PRIMARY KEY, btree (b)

Is this fine to allow it?
eventually it cause to pg_upgrade failed with below error.

pg_restore: creating TABLE "public.t1"
pg_restore: creating TABLE "public.t1_p1"
pg_restore: INFO:  partition constraint for table "t1_p1" is implied by
existing constraints
pg_restore: creating CONSTRAINT "public.t1 t1_pkey"
pg_restore: creating CONSTRAINT "public.t1_p1 t1_p1_pkey"
pg_restore: creating CONSTRAINT "public.t1_p1 t1_p1_pkey1"
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC entry 2920; 2606 16395
CONSTRAINT t1_p1 t1_p1_pkey1 edb
pg_restore: [archiver (db)] could not execute query: ERROR:  multiple
primary keys for table "t1_p1" are not allowed
Command was:
-- For binary upgrade, must preserve pg_class oids
SELECT
pg_catalog.binary_upgrade_set_next_index_pg_class_oid('16394'::pg_catalog.oid);

ALTER TABLE ONLY "public"."t1_p1"
ADD CONSTRAINT "t1_p1_pkey1" PRIMARY KEY ("b");

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Getting ERROR: could not open file "base/13164/t3_16388" with partition table with ON COMMIT

2018-09-12 Thread Rajkumar Raghuwanshi
Hi,

I am getting below error while creating temp root partition table with on
commit. getting same error from v10 onwards.

[edb@localhost bin]$ ./psql postgres
psql (10.5)
Type "help" for help.

postgres=# CREATE TEMP TABLE test ( c1 varchar, c2 int) PARTITION BY RANGE
(c1) ON COMMIT DELETE ROWS;
ERROR:  could not open file "base/13164/t3_16388": No such file or directory

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


cache lookup failed for constraint when alter table referred by partition table

2018-09-07 Thread Rajkumar Raghuwanshi
Hi,

I am getting cache lookup failed for constraint error on master and 11beta3
with below test case.

[edb@localhost bin]$ ./psql postgres
psql (11beta3)
Type "help" for help.

postgres=# CREATE TABLE non_part (a INT,PRIMARY KEY(a));
CREATE TABLE
postgres=# CREATE TABLE part (a INT REFERENCES non_part(a)) PARTITION BY
RANGE(a);
CREATE TABLE
postgres=# CREATE TABLE part_p1 PARTITION OF part FOR VALUES FROM
(MINVALUE) TO (MAXVALUE);
CREATE TABLE
postgres=# ALTER TABLE non_part ALTER COLUMN a TYPE bigint;
*ERROR:  cache lookup failed for constraint 16398*

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


negative bitmapset member not allowed Error with partition pruning

2018-07-26 Thread Rajkumar Raghuwanshi
Hi,

I am getting "ERROR:  negative bitmapset member not allowed" when
enable_partition_pruning set to true with below test case.

[edb@localhost bin]$ ./psql postgres
psql (12devel)
Type "help" for help.

postgres=# SET enable_partition_pruning TO on;
SET
postgres=# CREATE TABLE part (a INT, b INT) PARTITION BY LIST(a);
CREATE TABLE
postgres=# CREATE TABLE part_p1 PARTITION OF part FOR VALUES IN
(-2,-1,0,1,2);
CREATE TABLE
postgres=# CREATE TABLE part_p2 PARTITION OF part DEFAULT PARTITION BY
RANGE(a);
CREATE TABLE
postgres=# CREATE TABLE part_p2_p1 PARTITION OF part_p2 DEFAULT;
CREATE TABLE
postgres=# INSERT INTO part VALUES
(-1,-1),(1,1),(2,NULL),(NULL,-2),(NULL,NULL);
INSERT 0 5
postgres=# EXPLAIN (COSTS OFF)

*postgres-# SELECT tableoid::regclass as part, a, b FROM part WHERE a IS
NULL ORDER BY 1, 2, 3;ERROR:  negative bitmapset member not allowed*
postgres=# SET enable_partition_pruning TO off;
SET
postgres=# EXPLAIN (COSTS OFF)
SELECT tableoid::regclass as part, a, b FROM part WHERE a IS NULL ORDER BY
1, 2, 3;
   QUERY PLAN

 Sort
   Sort Key: ((part_p1.tableoid)::regclass), part_p1.a, part_p1.b
   ->  Append
 ->  Seq Scan on part_p1
   Filter: (a IS NULL)
 ->  Seq Scan on part_p2_p1
   Filter: (a IS NULL)
(7 rows)

postgres=#


Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: Test patch for partitionwise join with partitioned tables containing default partition

2018-07-05 Thread Rajkumar Raghuwanshi
Thanks for commit Jeff. Thanks for review Ashutosh, Thomas.

commit 4513d3a4be0bb7d0141f8b7eaf669a55c08e41b0
Author: Jeff Davis 
Date:   Thu Jul 5 18:56:12 2018 -0700

Add test for partitionwise join involving default partition.

Author: Rajkumar Raghuwanshi
Reviewed-by: Ashutosh Bapat
Discussion:
https://postgr.es/m/CAKcux6ky5YeZAY74qSh-ayPZZEQchz092g71iXXbC0%2BE3xoscA%40mail.gmail.com
Discussion:
https://postgr.es/m/CAKcux6kOQ85Xtzxu3tM1mR7Vk%3D7Z2e4rG7dL1iMZqPgLMpxQYg%40mail.gmail.com

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Wed, Jun 6, 2018 at 12:03 PM, Rajkumar Raghuwanshi <
rajkumar.raghuwan...@enterprisedb.com> wrote:

> On Wed, Jun 6, 2018 at 11:32 AM, Ashutosh Bapat <
> ashutosh.ba...@enterprisedb.com> wrote:
>
>> Thanks Rajkumar for starting a new thread. Please update the
>> commitfest entry as well.
>>
> I have attached new thread in commitfest and detached the old one.
>
> Thanks & Regards,
> Rajkumar Raghuwanshi
> QMG, EnterpriseDB Corporation
>


Re: Server crashed with "TRAP: unrecognized TOAST vartag("1", File: "heaptuple.c", Line: 1490)"

2018-06-29 Thread Rajkumar Raghuwanshi
On Fri, Jun 29, 2018 at 4:30 AM, Alvaro Herrera 
wrote:

> On 2018-Jun-28, Andres Freund wrote:
>
> > On 2018-06-28 22:35:11 +0530, Rajkumar Raghuwanshi wrote:
> > > On Thu, Jun 28, 2018 at 9:45 PM, Andres Freund 
> wrote:
> > > > On 2018-06-28 16:08:31 +0530, Rajkumar Raghuwanshi wrote:
> > > > > I am getting server crash with below test case, logfile message
> and core
> > > > > dump details in the mail.
>
> > Alvaro, sounds like it could be an open item assigned to you?
>
> You're right.  The attached patch seems to fix it.
>
Thanks for patch, It fixed the issue.


>
> I think I should add Rajkumar's test case too, though, so I'm not
> pushing this evening.
>
> --
> Álvaro Herrerahttps://www.2ndQuadrant.com/
> PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
>


Re: Server crashed with "TRAP: unrecognized TOAST vartag("1", File: "heaptuple.c", Line: 1490)"

2018-06-28 Thread Rajkumar Raghuwanshi
On Thu, Jun 28, 2018 at 9:45 PM, Andres Freund  wrote:

> Hi,
>
> On 2018-06-28 16:08:31 +0530, Rajkumar Raghuwanshi wrote:
> > I am getting server crash with below test case, logfile message and core
> > dump details in the mail.
>
> Which versions are affected here? Is this reproducible in postgres 10 or
> just the current beta?
>

This affected current beta only as this is not reproducible in postgres 10
because index create on
partitioned table is not supported in v10.


Server crashed with "TRAP: unrecognized TOAST vartag("1", File: "heaptuple.c", Line: 1490)"

2018-06-28 Thread Rajkumar Raghuwanshi
384, lockmode=8,
stmt=0x1f81460) at tablecmds.c:3312
#19 0x008cea8e in ProcessUtilitySlow (pstate=0x1f81348,
pstmt=0x1eb95a0, queryString=0x1eb8798 "ALTER TABLE part_tbl ALTER COLUMN c
TYPE numeric;",
context=PROCESS_UTILITY_TOPLEVEL, params=0x0, queryEnv=0x0,
dest=0x1eb9890, completionTag=0x7ffea4337fd0 "") at utility.c:1117
#20 0x008ce50e in standard_ProcessUtility (pstmt=0x1eb95a0,
queryString=0x1eb8798 "ALTER TABLE part_tbl ALTER COLUMN c TYPE numeric;",
context=PROCESS_UTILITY_TOPLEVEL, params=0x0, queryEnv=0x0,
dest=0x1eb9890, completionTag=0x7ffea4337fd0 "") at utility.c:920
#21 0x008cd530 in ProcessUtility (pstmt=0x1eb95a0,
queryString=0x1eb8798 "ALTER TABLE part_tbl ALTER COLUMN c TYPE numeric;",
context=PROCESS_UTILITY_TOPLEVEL,
params=0x0, queryEnv=0x0, dest=0x1eb9890, completionTag=0x7ffea4337fd0
"") at utility.c:360
#22 0x008cc4de in PortalRunUtility (portal=0x1f1dc28,
pstmt=0x1eb95a0, isTopLevel=true, setHoldSnapshot=false, dest=0x1eb9890,
completionTag=0x7ffea4337fd0 "")
at pquery.c:1178
#23 0x008cc706 in PortalRunMulti (portal=0x1f1dc28,
isTopLevel=true, setHoldSnapshot=false, dest=0x1eb9890, altdest=0x1eb9890,
completionTag=0x7ffea4337fd0 "")
at pquery.c:1324
#24 0x008cbbf3 in PortalRun (portal=0x1f1dc28,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x1eb9890,
altdest=0x1eb9890,
completionTag=0x7ffea4337fd0 "") at pquery.c:799
#25 0x008c5afc in exec_simple_query (query_string=0x1eb8798 "ALTER
TABLE part_tbl ALTER COLUMN c TYPE numeric;") at postgres.c:1122
#26 0x008c9dd2 in PostgresMain (argc=1, argv=0x1ee22a0,
dbname=0x1ee2100 "postgres", username=0x1eb5298 "edb") at postgres.c:4153
#27 0x00827467 in BackendRun (port=0x1eda060) at postmaster.c:4361
Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: alter index WITH ( storage_parameter = value [, ... ] ) for partition index.

2018-06-28 Thread Rajkumar Raghuwanshi
On Thu, Jun 28, 2018 at 12:07 PM, Michael Paquier 
wrote:

> On Thu, Jun 28, 2018 at 11:51:23AM +0530, Rajkumar Raghuwanshi wrote:
> > postgres=# select pgstatindex('part_pk');
> > ERROR:  relation "part_pk" is not a btree index
>
> This error message is intentional.  Please see bef5fcc and its related
> thread:
> https://www.postgresql.org/message-id/CAH2-WzkOKptQiE51Bh4_
> xeehhabwhkzkgtkizrfmgekfuur...@mail.gmail.com


Thanks, Sorry I missed thread.

>
>


Re: alter index WITH ( storage_parameter = value [, ... ] ) for partition index.

2018-06-28 Thread Rajkumar Raghuwanshi
another case where I got error like partition table index is not a index is
given below.

postgres=# create table part(a int, constraint part_pk primary key(a))
PARTITION BY RANGE(a);
CREATE TABLE
postgres=# create table part_p1 partition of part for values from
(minvalue) to (0);
CREATE TABLE
postgres=# create table part_p2 partition of part for values from (0) to
(maxvalue);
CREATE TABLE
postgres=# create EXTENSION if not exists pgstattuple;
CREATE EXTENSION
postgres=# select pgstatindex('part_p1_pkey');
 pgstatindex
--
 (3,0,8192,0,0,0,0,0,NaN,NaN)
(1 row)

postgres=# select pgstatindex('part_pk');
ERROR:  relation "part_pk" is not a btree index
postgres=#

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Wed, Jun 27, 2018 at 3:12 PM, Rajkumar Raghuwanshi <
rajkumar.raghuwan...@enterprisedb.com> wrote:

> Hi,
>
> I have created partition table index with some storage_parameter like
> example given below, I am not able to reset/modify it from partition table.
> Is this fine.
>
> postgres=# create table part(a int) PARTITION BY RANGE(a);
> CREATE TABLE
> postgres=# create table part_p partition of part for values from
> (minvalue) to (maxvalue);
> CREATE TABLE
> postgres=# create index part_idx on part(a) with (fillfactor = '14');
> CREATE INDEX
> postgres=# \d part
> Table "public.part"
>  Column |  Type   | Collation | Nullable | Default
> +-+---+--+-
>  a  | integer |   |  |
> Partition key: RANGE (a)
> Indexes:
> "part_idx" btree (a) WITH (fillfactor='14')
> Number of partitions: 1 (Use \d+ to list them.)
>
> postgres=# \d part_p
>Table "public.part_p"
>  Column |  Type   | Collation | Nullable | Default
> +-+---+--+-
>  a  | integer |   |  |
> Partition of: part FOR VALUES FROM (MINVALUE) TO (MAXVALUE)
> Indexes:
> "part_p_a_idx" btree (a) WITH (fillfactor='14')
>
>
> *postgres=# alter index part_idx reset (fillfactor);ERROR:  "part_idx" is
> not a table, view, materialized view, or index*
> postgres=# alter index part_p_a_idx reset (fillfactor);
> ALTER INDEX
> postgres=# \d+ part
>Table "public.part"
>  Column |  Type   | Collation | Nullable | Default | Storage | Stats
> target | Description
> +-+---+--+-+
> -+--+-
>  a  | integer |   |  | | plain
> |  |
> Partition key: RANGE (a)
> Indexes:
>* "part_idx" btree (a) WITH (fillfactor='14')*
> Partitions: part_p FOR VALUES FROM (MINVALUE) TO (MAXVALUE)
>
> postgres=# \d part_p
>Table "public.part_p"
>  Column |  Type   | Collation | Nullable | Default
> +-+---+--+-
>  a  | integer |   |  |
> Partition of: part FOR VALUES FROM (MINVALUE) TO (MAXVALUE)
> Indexes:
> "part_p_a_idx" btree (a)
>
> Thanks & Regards,
> Rajkumar Raghuwanshi
> QMG, EnterpriseDB Corporation
>


unexpected relkind: 73 ERROR with partition table index

2018-06-26 Thread Rajkumar Raghuwanshi
Hi,

I am getting "ERROR:  unexpected relkind: 73" when trying to rename
partition table index with below test case.

create user u1 superuser;
create user u2 nosuperuser login;
\c postgres u1

create table public.part(c1 int, c2 int) partition by range(c1);
create table public.part_p1 partition of public.part for values from
(minvalue) to (0);
create table public.part_p2 partition of public.part for values from (0) to
(maxvalue);
create index part_idx on public.part(c1);

create table public.nopart (c1 int, c2 int);
create index nopart_idx on public.nopart(c1);

--switch to nonsuperuser
\c postgres u2

postgres=> --rename the index owned by another user --non partition table
postgres=> ALTER INDEX nopart_idx RENAME TO nopart_idx_renamed;
ERROR:  must be owner of index nopart_idx
postgres=>
postgres=> --rename the index owned by another user --partition table
postgres=> ALTER INDEX part_idx RENAME TO part_idx_renamed;
ERROR:  unexpected relkind: 73


Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: partition table and stddev() /variance() behaviour

2018-06-22 Thread Rajkumar Raghuwanshi
Thanks for commit. I have verified reported case. it is fixed now.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Fri, Jun 22, 2018 at 8:38 AM, David Rowley 
wrote:

> On 22 June 2018 at 03:30, Tom Lane  wrote:
> >> I think some coverage of the numerical aggregates is a good idea, so
> >> I've added some in the attached. I managed to get a parallel plan
> >> going with a query to onek, which is pretty cheap to execute. I didn't
> >> touch the bool aggregates. Maybe I should have done that too..?
> >
> > This sort of blunderbuss testing was exactly what I *didn't* want to do.
> > Not only is this adding about 20x as many cycles as we need (at least for
> > this specific numeric_poly_combine issue), but I'm quite afraid that the
> > float4 and/or float8 cases will show low-order-digit irreproducibility
> > in the buildfarm.
>
> okay. My sniper rifle was locked away for the evening. I decided it
> was best to sleep before any careful aiming was required.
>
> I see you've done the deed already. Thanks.
>
> --
>  David Rowley   http://www.2ndQuadrant.com/
>  PostgreSQL Development, 24x7 Support, Training & Services
>


Re: ERROR: ORDER/GROUP BY expression not found in targetlist

2018-06-22 Thread Rajkumar Raghuwanshi
Thanks for commit. I have verified reported case. it is fixed now.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Thu, Jun 21, 2018 at 1:54 AM, Tom Lane  wrote:

> I wrote:
> > Thanks for the report.  I traced through this, and the problem seems to
> > be that split_pathtarget_at_srfs() is neglecting to attach sortgroupref
> > labeling to the extra PathTargets it constructs.  I don't remember
> > whether that code is my fault or Andres', but I'll take a look at
> > fixing it.
>
> Here's a proposed patch for that.
>
> regards, tom lane
>
>


Re: server crashed with TRAP: FailedAssertion("!(!parallel_aware || pathnode->path.parallel_safe)"

2018-06-22 Thread Rajkumar Raghuwanshi
Thanks for commit. I have verified reported case. it is fixed now.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Thu, Jun 21, 2018 at 7:21 PM, Tom Lane  wrote:

> Amit Kapila  writes:
> > On Thu, Jun 21, 2018 at 11:51 AM, Amit Khandekar 
> wrote:
> >> After list_concat, the subpaths no longer has only non-partial paths,
> >> which it is supposed to have. So it anyways should not be used in the
> >> subsequent code in that function. So I think the following change
> >> should be good.
> >> -   foreach(l, subpaths)
> >> +   foreach(l, pathnode->subpaths)
>
> Patch LGTM.
>
> > Thanks, Tom, would you like to go-ahead and commit this change as this
> > is suggested by you or would you like me to take care of this or do
> > you want to wait for Robert's input?
>
> Please push --- I'm busy getting ready to leave for vacation ...
>
> regards, tom lane
>


Re: Server crashed with TRAP: FailedAssertion("!(parallel_workers > 0)" when partitionwise_aggregate true.

2018-06-22 Thread Rajkumar Raghuwanshi
On Fri, Jun 22, 2018 at 11:15 AM, Jeevan Chalke <
jeevan.cha...@enterprisedb.com> wrote:

> Hi,
>
> Off-list Ashutosh Bapat has suggested using a flag instead of counting
> number of
> dummy rels and then manipulating on it. That will be simple and smoother.
>
> I agree with his suggestion and updated my patch accordingly.
>
I have applied patch and checked reported issue. Patch applied cleanly and
issues not reproducible any more.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


partition table and stddev() /variance() behaviour

2018-06-21 Thread Rajkumar Raghuwanshi
Hi,

I am getting different output for stddev/variance functions with partition
tables.


CREATE TABLE part (c1 INT,c2 INT) PARTITION BY RANGE (c1);
CREATE TABLE part_p1 PARTITION OF part FOR VALUES FROM (1) TO (3);
CREATE TABLE part_p2 PARTITION OF part FOR VALUES FROM (3) TO (5);

INSERT INTO part VALUES (1,5),(2,15),(3,3),(4,17);

postgres=# SET parallel_setup_cost=0;
SET
postgres=# EXPLAIN SELECT COUNT(c1),STDDEV(c2),VARIANCE(c2) FROM part;
 QUERY
PLAN

 Finalize Aggregate  (cost=70.36..70.37 rows=1 width=72)
   ->  Gather  (cost=70.12..70.33 rows=2 width=72)
 Workers Planned: 2
 ->  Partial Aggregate  (cost=70.12..70.13 rows=1 width=72)
   ->  Parallel Append  (cost=0.00..56.00 rows=1882 width=8)
 ->  Parallel Seq Scan on part_p1  (cost=0.00..23.29
rows=1329 width=8)
 ->  Parallel Seq Scan on part_p2  (cost=0.00..23.29
rows=1329 width=8)
(7 rows)

postgres=# SELECT COUNT(c1),STDDEV(c2),VARIANCE(c2) FROM part;





* count | stddev | variance ---++-- 4 |  0
|0(1 row)*postgres=#
postgres=# RESET parallel_setup_cost;
RESET
postgres=# EXPLAIN SELECT COUNT(c1),STDDEV(c2),VARIANCE(c2) FROM part;
  QUERY PLAN
---
 Aggregate  (cost=121.71..121.72 rows=1 width=72)
   ->  Append  (cost=0.00..87.80 rows=4520 width=8)
 ->  Seq Scan on part_p1  (cost=0.00..32.60 rows=2260 width=8)
 ->  Seq Scan on part_p2  (cost=0.00..32.60 rows=2260 width=8)
(4 rows)

postgres=# SELECT COUNT(c1),STDDEV(c2),VARIANCE(c2) FROM part;




* count |   stddev   |  variance
---++- 4 |
7.0237691685684926 | 49.(1 row)*
Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


ERROR: ORDER/GROUP BY expression not found in targetlist

2018-06-20 Thread Rajkumar Raghuwanshi
Hi,

Below test case is failing with ERROR:  ORDER/GROUP BY expression not found
in targetlist. this issue is reproducible from PGv10.

postgres=# CREATE TABLE test(c1 int, c2 text, c3 text);
CREATE TABLE
postgres=# INSERT INTO test SELECT i % 10, i % 250, to_char(i % 4,
'FM000') FROM GENERATE_SERIES(1,10,1)i;
INSERT 0 10
postgres=# ANALYZE test;
ANALYZE
postgres=# EXPLAIN (verbose) SELECT c1, generate_series(1,1), count(*) FROM
test GROUP BY 1 HAVING count(*) > 1;
  QUERY
PLAN
---
 ProjectSet  (cost=2291.00..2306.15 rows=3000 width=16)
   Output: c1, generate_series(1, 1), (count(*))
   ->  HashAggregate  (cost=2291.00..2291.12 rows=3 width=12)
 Output: c1, count(*)
 Group Key: test.c1
 Filter: (count(*) > 1)
 ->  Seq Scan on public.test  (cost=0.00..1541.00 rows=10
width=4)
   Output: c1, c2, c3
(8 rows)

postgres=# SET min_parallel_table_scan_size=0;
SET
postgres=# EXPLAIN (verbose) SELECT c1, generate_series(1,1), count(*) FROM
test GROUP BY 1 HAVING count(*) > 1;
ERROR:  ORDER/GROUP BY expression not found in targetlist

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Server crashed with TRAP: FailedAssertion("!(parallel_workers > 0)" when partitionwise_aggregate true.

2018-06-18 Thread Rajkumar Raghuwanshi
7  0x007cb344 in grouping_planner (root=0x1d6ff08,
inheritance_update=false, tuple_fraction=0) at planner.c:2037
#8  0x007c94e6 in subquery_planner (glob=0x1d6fe70,
parse=0x1d2a658, parent_root=0x0, hasRecursion=false, tuple_fraction=0) at
planner.c:966
#9  0x007c80a3 in standard_planner (parse=0x1d2a658,
cursorOptions=256, boundParams=0x0) at planner.c:405
#10 0x007c7dcb in planner (parse=0x1d2a658, cursorOptions=256,
boundParams=0x0) at planner.c:263
#11 0x008c4576 in pg_plan_query (querytree=0x1d2a658,
cursorOptions=256, boundParams=0x0) at postgres.c:809
#12 0x0064a1d0 in ExplainOneQuery (query=0x1d2a658,
cursorOptions=256, into=0x0, es=0x1d24460,
queryString=0x1c68798 "EXPLAIN (COSTS OFF)\nSELECT
AVG(t2.c1),SUM(t1.c1) FROM part t1 INNER JOIN part t2 ON (t1.c1 = t2.c1)
GROUP BY t1.c1, t2.c1 HAVING SUM(t1.c1) % 125 = 0 ORDER BY 1,2;",
params=0x0, queryEnv=0x0) at explain.c:365
#13 0x00649ed2 in ExplainQuery (pstate=0x1c8be28, stmt=0x1d34b08,
queryString=0x1c68798 "EXPLAIN (COSTS OFF)\nSELECT
AVG(t2.c1),SUM(t1.c1) FROM part t1 INNER JOIN part t2 ON (t1.c1 = t2.c1)
GROUP BY t1.c1, t2.c1 HAVING SUM(t1.c1) % 125 = 0 ORDER BY 1,2;",
params=0x0, queryEnv=0x0, dest=0x1c8bd90) at explain.c:254
#14 0x008ccd99 in standard_ProcessUtility (pstmt=0x1d34bd8,
queryString=0x1c68798 "EXPLAIN (COSTS OFF)\nSELECT
AVG(t2.c1),SUM(t1.c1) FROM part t1 INNER JOIN part t2 ON (t1.c1 = t2.c1)
GROUP BY t1.c1, t2.c1 HAVING SUM(t1.c1) % 125 = 0 ORDER BY 1,2;",
context=PROCESS_UTILITY_TOPLEVEL, params=0x0, queryEnv=0x0, dest=0x1c8bd90,
completionTag=0x7ffceb18e450 "") at utility.c:672
#15 0x008cc520 in ProcessUtility (pstmt=0x1d34bd8,
queryString=0x1c68798 "EXPLAIN (COSTS OFF)\nSELECT
AVG(t2.c1),SUM(t1.c1) FROM part t1 INNER JOIN part t2 ON (t1.c1 = t2.c1)
GROUP BY t1.c1, t2.c1 HAVING SUM(t1.c1) % 125 = 0 ORDER BY 1,2;",
context=PROCESS_UTILITY_TOPLEVEL, params=0x0, queryEnv=0x0, dest=0x1c8bd90,
completionTag=0x7ffceb18e450 "") at utility.c:360
#16 0x008cb4ce in PortalRunUtility (portal=0x1ccdc28,
pstmt=0x1d34bd8, isTopLevel=true, setHoldSnapshot=true, dest=0x1c8bd90,
completionTag=0x7ffceb18e450 "")
at pquery.c:1178
#17 0x008cb1c5 in FillPortalStore (portal=0x1ccdc28,
isTopLevel=true) at pquery.c:1038
#18 0x008caaf6 in PortalRun (portal=0x1ccdc28,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x1d6d9e8,
altdest=0x1d6d9e8,
completionTag=0x7ffceb18e650 "") at pquery.c:768
#19 0x008c4aef in exec_simple_query (
query_string=0x1c68798 "EXPLAIN (COSTS OFF)\nSELECT
AVG(t2.c1),SUM(t1.c1) FROM part t1 INNER JOIN part t2 ON (t1.c1 = t2.c1)
GROUP BY t1.c1, t2.c1 HAVING SUM(t1.c1) % 125 = 0 ORDER BY 1,2;") at
postgres.c:1122
#20 0x008c8dbf in PostgresMain (argc=1, argv=0x1c922a0,
dbname=0x1c92100 "postgres", username=0x1c65298 "edb") at postgres.c:4153
#21 0x00826703 in BackendRun (port=0x1c8a060) at postmaster.c:4361
#22 0x00825e71 in BackendStartup (port=0x1c8a060) at
postmaster.c:4033
#23 0x00822253 in ServerLoop () at postmaster.c:1706
#24 0x00821b85 in PostmasterMain (argc=3, argv=0x1c631f0) at
postmaster.c:1379
#25 0x00748d64 in main (argc=3, argv=0x1c631f0) at main.c:228

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Getting "ERROR: did not find all requested child rels in append_rel_list" when enable_partition_pruning is on

2018-06-15 Thread Rajkumar Raghuwanshi
Hi,

I am getting "ERROR:  did not find all requested child rels in
append_rel_list" when enable_partition_pruning is on for below test case.

CREATE TABLE test(c1 int, c2 int) PARTITION BY RANGE(c1);
CREATE TABLE test_p1 PARTITION OF test FOR VALUES FROM (minvalue) TO (0);
CREATE TABLE test_p2 PARTITION OF test FOR VALUES FROM  (0) TO (maxvalue);
select * from (select * from test a union all select *  from test b) ss
where (c1 >= c2);

postgres=# set enable_partition_pruning to off;
SET
postgres=# select * from (select * from test a union all select *  from
test b) ss where (c1 >= c2);
 c1 | c2
+
(0 rows)

postgres=#
postgres=# set enable_partition_pruning to on;
SET
postgres=# select * from (select * from test a union all select *  from
test b) ss where (c1 >= c2);
ERROR:  did not find all requested child rels in append_rel_list


Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


server crashed with TRAP: FailedAssertion("!(!parallel_aware || pathnode->path.parallel_safe)"

2018-06-14 Thread Rajkumar Raghuwanshi
Hi,

I am getting a server crash for below test case.

postgres=# CREATE TABLE test( c1 int, c2 int, c3 text) partition by
range(c1);
CREATE TABLE
postgres=# create table test_p1 partition of test for values from
(minvalue) to (0);
CREATE TABLE
postgres=# create table test_p2 partition of test for values from (0) to
(maxvalue);
CREATE TABLE
postgres=#
postgres=# select (select max((select t1.c2 from test t1 where t1.c1 =
t2.c1))) from test t2;
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The connection to the server was lost. Attempting reset: Failed.
!>

--*logfile*
TRAP: FailedAssertion("!(!parallel_aware || pathnode->path.parallel_safe)",
File: "pathnode.c", Line: 1288)
2018-06-13 12:48:41.577 IST [52050] LOG:  server process (PID 52061) was
terminated by signal 6: Aborted
2018-06-13 12:48:41.577 IST [52050] DETAIL:  Failed process was running:
select (select max((select t1.c2 from test t1 where t1.c1 = t2.c1))) from
test t2;

*--core file *
Core was generated by `postgres: edb postgres [local]
SELECT   '.
Program terminated with signal 6, Aborted.
#0  0x003dd2632495 in raise (sig=6) at
../nptl/sysdeps/unix/sysv/linux/raise.c:64
64  return INLINE_SYSCALL (tgkill, 3, pid, selftid, sig);
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-23.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-57.el6.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  0x003dd2632495 in raise (sig=6) at
../nptl/sysdeps/unix/sysv/linux/raise.c:64
#1  0x003dd2633c75 in abort () at abort.c:92
#2  0x00a32782 in ExceptionalCondition (conditionName=0xc23718
"!(!parallel_aware || pathnode->path.parallel_safe)", errorType=0xc23411
"FailedAssertion",
fileName=0xc2357d "pathnode.c", lineNumber=1288) at assert.c:54
#3  0x007f1a3d in create_append_path (root=0x27a5930,
rel=0x27c25f0, subpaths=0x27c4e60, partial_subpaths=0x0,
required_outer=0x0, parallel_workers=2,
parallel_aware=true, partitioned_rels=0x27c36f0, rows=-1) at
pathnode.c:1288
#4  0x00797d40 in add_paths_to_append_rel (root=0x27a5930,
rel=0x27c25f0, live_childrels=0x27c4958) at allpaths.c:1700
#5  0x007d3392 in apply_scanjoin_target_to_paths (root=0x27a5930,
rel=0x27c25f0, scanjoin_targets=0x27c4558,
scanjoin_targets_contain_srfs=0x0,
scanjoin_target_parallel_safe=false, tlist_same_exprs=true) at
planner.c:6962
#6  0x007cb218 in grouping_planner (root=0x27a5930,
inheritance_update=false, tuple_fraction=0) at planner.c:2014
#7  0x007c943a in subquery_planner (glob=0x27855e0,
parse=0x26c7250, parent_root=0x0, hasRecursion=false, tuple_fraction=0) at
planner.c:966
#8  0x007c7ff7 in standard_planner (parse=0x26c7250,
cursorOptions=256, boundParams=0x0) at planner.c:405
#9  0x007c7d1f in planner (parse=0x26c7250, cursorOptions=256,
boundParams=0x0) at planner.c:263
#10 0x008c461e in pg_plan_query (querytree=0x26c7250,
cursorOptions=256, boundParams=0x0) at postgres.c:809
#11 0x008c4751 in pg_plan_queries (querytrees=0x27a58f8,
cursorOptions=256, boundParams=0x0) at postgres.c:875
#12 0x008c4a26 in exec_simple_query (query_string=0x26c5798 "select
(select max((select t1.c2 from test t1 where t1.c1 = t2.c1))) from test
t2;") at postgres.c:1050
#13 0x008c8e67 in PostgresMain (argc=1, argv=0x26ef2a0,
dbname=0x26ef100 "postgres", username=0x26c2298 "edb") at postgres.c:4153
#14 0x00826657 in BackendRun (port=0x26e7060) at postmaster.c:4361
#15 0x00825dc5 in BackendStartup (port=0x26e7060) at
postmaster.c:4033
#16 0x008221a7 in ServerLoop () at postmaster.c:1706
#17 0x00821ad9 in PostmasterMain (argc=3, argv=0x26c01f0) at
postmaster.c:1379
#18 0x000000748cb8 in main (argc=3, argv=0x26c01f0) at main.c:228

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


ntile() throws ERROR when hashagg is false

2018-06-14 Thread Rajkumar Raghuwanshi
Hi

ntile() throws ERROR when hashagg is false, test case given below.

postgres=# create table foo (a int, b int, c text);
CREATE TABLE
postgres=# insert into foo select i%20, i%30, to_char(i%12, 'FM') from
generate_series(0, 36) i;
INSERT 0 37
postgres=# explain select ntile(a) OVER () from foo GROUP BY a;
QUERY PLAN
---
 WindowAgg  (cost=25.00..29.50 rows=200 width=8)
   ->  HashAggregate  (cost=25.00..27.00 rows=200 width=4)
 Group Key: a
 ->  Seq Scan on foo  (cost=0.00..22.00 rows=1200 width=4)
(4 rows)

postgres=# select ntile(a) OVER () from foo GROUP BY a;
 ntile
---
 1
 1
 2
 2
 3
 3
 4
 4
 5
 5
 6
 6
 7
 7
 8
 8
 9
 9
10
11
(20 rows)

postgres=# set enable_hashagg to false;
SET
postgres=# explain select ntile(a) OVER () from foo GROUP BY a;
   QUERY PLAN
-
 WindowAgg  (cost=83.37..91.87 rows=200 width=8)
   ->  Group  (cost=83.37..89.37 rows=200 width=4)
 Group Key: a
 ->  Sort  (cost=83.37..86.37 rows=1200 width=4)
   Sort Key: a
   ->  Seq Scan on foo  (cost=0.00..22.00 rows=1200 width=4)
(6 rows)
postgres=# select ntile(a) OVER () from foo GROUP BY a;
ERROR:  argument of ntile must be greater than zero

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Server crashed with dense_rank on partition table.

2018-06-12 Thread Rajkumar Raghuwanshi
n (estate=0x1368c68,
planstate=0x1368e80, use_parallel_mode=false, operation=CMD_SELECT,
sendTuples=true, numberTuples=0,
direction=ForwardScanDirection, dest=0x137a928, execute_once=true) at
execMain.c:1726
#24 0x006dc34b in standard_ExecutorRun (queryDesc=0x1354318,
direction=ForwardScanDirection, count=0, execute_once=true) at
execMain.c:363
#25 0x006dc167 in ExecutorRun (queryDesc=0x1354318,
direction=ForwardScanDirection, count=0, execute_once=true) at
execMain.c:306
#26 0x008cadd2 in PortalRunSelect (portal=0x12f0c28, forward=true,
count=0, dest=0x137a928) at pquery.c:932
#27 0x008caa60 in PortalRun (portal=0x12f0c28,
count=9223372036854775807, isTopLevel=true, run_once=true, dest=0x137a928,
altdest=0x137a928,
completionTag=0x7ffe988f43a0 "") at pquery.c:773
#28 0x008c4a37 in exec_simple_query (query_string=0x128b798 "SELECT
dense_rank(b) WITHIN GROUP (ORDER BY a) FROM pagg_tab GROUP BY b ORDER BY
1;") at postgres.c:1122
#29 0x008c8d07 in PostgresMain (argc=1, argv=0x12b52a0,
dbname=0x12b5100 "postgres", username=0x1288298 "edb") at postgres.c:4153
#30 0x008264f7 in BackendRun (port=0x12ad060) at postmaster.c:4361
#31 0x00825c65 in BackendStartup (port=0x12ad060) at
postmaster.c:4033
#32 0x00822047 in ServerLoop () at postmaster.c:1706
#33 0x00821979 in PostmasterMain (argc=3, argv=0x12861f0) at
postmaster.c:1379
#34 0x00748bc4 in main (argc=3, argv=0x12861f0) at main.c:228

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: Test patch for partitionwise join with partitioned tables containing default partition

2018-06-06 Thread Rajkumar Raghuwanshi
On Wed, Jun 6, 2018 at 11:32 AM, Ashutosh Bapat <
ashutosh.ba...@enterprisedb.com> wrote:

> Thanks Rajkumar for starting a new thread. Please update the
> commitfest entry as well.
>
I have attached new thread in commitfest and detached the old one.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Partition-wise join for join between (declaratively) partitioned tables

2018-06-06 Thread Rajkumar Raghuwanshi
On Wed, Jun 6, 2018 at 9:21 AM, Ashutosh Bapat <
ashutosh.ba...@enterprisedb.com> wrote:

> On Wed, Jun 6, 2018 at 8:11 AM, Thomas Munro
>  wrote:
> > On Mon, Mar 5, 2018 at 8:13 PM, Rajkumar Raghuwanshi
> >  wrote:
> >> On Wed, Feb 7, 2018 at 2:00 PM, Ashutosh Bapat
> >> Changed partition-wise statement to partitionwise.
> >> Attached re-based patch.
> >>
> >>> The patch looks good to me. I don't think we can reduce it further.
> >>> But we need some tests to test PWJ with default partitions. Marking
> >>> this as ready for committer.
> >
> > Hi Rajkumar,
> >
> > partition_join ... FAILED
> >
>
Thanks Thomas for patch review.

That made my heart stop for fraction of a second. I thought, something
> happened which caused partition_join test fail in master. But then I
> realised you are talking about Rajkumar's patch and test in that
> patch. I think it's better to start a separate thread discussing his
> patch, before I loose my heart ;)

Yeah, that would be better.

here is the new thread with updated patch.
https://www.postgresql.org/message-id/CAKcux6ky5YeZAY74qSh-ayPZZEQchz092g71iXXbC0%2BE3xoscA%40mail.gmail.com

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Test patch for partitionwise join with partitioned tables containing default partition

2018-06-05 Thread Rajkumar Raghuwanshi
Hi,

As of now partition_join.sql is not having test cases covering cases
where partition table have default partition, attaching a small test
case patch to cover those.

Here is a link of previous discussion :
https://www.postgresql.org/message-id/CAKcux6%3DLO-
XK9G0yLe634%2B0SP2UOn5ksVnmF-OntTBOEEaUGTg%40mail.gmail.com

As found by Thomas, The regression test currently fails with v4 patch
because a
redundant Result node has been removed from a query plan. here is the
updated
v5 patch fixing this.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index b983f9c..8b3798e 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -1328,6 +1328,76 @@ SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, ph
  273. | 273. | 548. | 0005 | 0005 | A0005
 (6 rows)
 
+-- test default partition behavior for range
+ALTER TABLE prt1 DETACH PARTITION prt1_p3;
+ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
+ANALYZE prt1;
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.b = t1.a)
+   ->  Seq Scan on prt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.b = t1_1.a)
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on prt1_p2 t1_1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.b = t1_2.a)
+   ->  Seq Scan on prt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on prt1_p3 t1_2
+   Filter: (b = 0)
+(21 rows)
+
+-- test default partition behavior for list
+ALTER TABLE plt1 DETACH PARTITION plt1_p3;
+ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT;
+ANALYZE plt1;
+ALTER TABLE plt2 DETACH PARTITION plt2_p3;
+ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT;
+ANALYZE plt2;
+EXPLAIN (COSTS OFF)
+SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c;
+   QUERY PLAN   
+
+ Sort
+   Sort Key: t1.c
+   ->  HashAggregate
+ Group Key: t1.c, t2.c
+ ->  Append
+   ->  Hash Join
+ Hash Cond: (t2.c = t1.c)
+ ->  Seq Scan on plt2_p1 t2
+ ->  Hash
+   ->  Seq Scan on plt1_p1 t1
+ Filter: ((a % 25) = 0)
+   ->  Hash Join
+ Hash Cond: (t2_1.c = t1_1.c)
+ ->  Seq Scan on plt2_p2 t2_1
+ ->  Hash
+   ->  Seq Scan on plt1_p2 t1_1
+ Filter: ((a % 25) = 0)
+   ->  Hash Join
+ Hash Cond: (t2_2.c = t1_2.c)
+ ->  Seq Scan on plt2_p3 t2_2
+ ->  Hash
+   ->  Seq Scan on plt1_p3 t1_2
+ Filter: ((a % 25) = 0)
+(23 rows)
+
 --
 -- multiple levels of partitioning
 --
@@ -1857,3 +1927,30 @@ SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c);
->  Seq Scan on prt1_n_p2 t1_1
 (10 rows)
 
+-- partitionwise join can not be applied if only one of joining table has
+-- default partition
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 FOR VALUES FROM (500) TO (600);
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Hash Join
+ Hash Cond: (t2.b = t1.a)
+ ->  Append
+   ->  Seq Scan on prt2_p1 t2
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Seq Scan on prt2_p3 t2_2
+ ->  Hash
+   ->  Append
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Seq Scan on prt1_p2 t1_1
+   Filte

Re: New committers announced at PGCon 2018

2018-06-03 Thread Rajkumar Raghuwanshi
Congratulations Everyone :)

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation

On Sun, Jun 3, 2018 at 10:55 PM, MauMau  wrote:

> From: Tom Lane
> The core team is pleased to announce the appointment of seven
> new Postgres committers:
>
>
> Congratulations to 7 cool generals!  Let's help new committers by
> doing good review and test, and let's aim for committers.
>
> FYI, I had a quick look at how many years the new committers have
> contributed to the community.  I used the following archive search
> with the search term "From: ".  I'm afraid this is an
> approximation, because the maximum hit count is 1,000.  There must
> have been long histories...
>
> https://www.postgresql.org/search/?m=1=pgsql-hackers
>
>
> Etsuro Fujita
> 7 years, 2011-09-12
> https://www.postgresql.org/message-id/4e6de18b.4010...@lab.ntt.co.jp
>
> Peter Geoghegan
> 8 years, 2010-02-11
> https://www.postgresql.org/message-id/db471ace1002110833v708f7f60xd07e
> 9534cdb3c...@mail.gmail.com
>
> Amit Kapila
> 7 years, 2011-09-07
> https://www.postgresql.org/message-id/DEA262E072764EBD82E0916334414EE3
> @china.huawei.com
>
> Alexander Korotkov
> 8 years, 2010-05-12
> https://www.postgresql.org/message-id/1273690962-sup-2...@alvh.no-ip.o
> rg
>
> Thomas Munro
> 7 years, 2011-11-06
> https://www.postgresql.org/message-id/CADLWmXXSBRt-4kKFFbR4jUdVskZEF4O
> b5l_asq+e7qx1p1d...@mail.gmail.com
>
> Michael Paquier
> 9 years, 2009-08-07
> https://www.postgresql.org/message-id/c64c5f8b0908062031k3ff48428j824a
> 9a46f2818...@mail.gmail.com
>
> Tomas Vondra
> 11 years, 2007-01-25
> https://www.postgresql.org/message-id/20070125155424.gg64...@nasby.net
>
>
> Regards
> MauMau
>
>
>
>


Re: parallel append vs. simple UNION ALL

2018-03-16 Thread Rajkumar Raghuwanshi
On Fri, Mar 9, 2018 at 1:04 AM, Robert Haas <robertmh...@gmail.com> wrote:

> Great.  Committed 0001.  Are you planning any further testing of this
> patch series?


Sorry I missed the mail.
Yes, I have further tested patches and find no more issues.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Partition-wise join for join between (declaratively) partitioned tables

2018-03-04 Thread Rajkumar Raghuwanshi
On Wed, Feb 7, 2018 at 2:00 PM, Ashutosh Bapat <
ashutosh.ba...@enterprisedb.com> wrote:

> On Fri, Dec 22, 2017 at 3:00 PM, Rajkumar Raghuwanshi
> <rajkumar.raghuwan...@enterprisedb.com> wrote:
> > updated test patch attached.
>
Changed partition-wise statement to partitionwise.
Attached re-based patch.


> The patch looks good to me. I don't think we can reduce it further.
> But we need some tests to test PWJ with default partitions. Marking
> this as ready for committer.
>
Thanks Ashutosh.
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 4fccd9a..7bbdc36 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -1338,6 +1338,77 @@ SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, ph
  273. | 273. | 548. | 0005 | 0005 | A0005
 (6 rows)
 
+-- test default partition behavior for range
+ALTER TABLE prt1 DETACH PARTITION prt1_p3;
+ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
+ANALYZE prt1;
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.b = t1.a)
+   ->  Seq Scan on prt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.b = t1_1.a)
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on prt1_p2 t1_1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.b = t1_2.a)
+   ->  Seq Scan on prt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on prt1_p3 t1_2
+   Filter: (b = 0)
+(21 rows)
+
+-- test default partition behavior for list
+ALTER TABLE plt1 DETACH PARTITION plt1_p3;
+ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT;
+ANALYZE plt1;
+ALTER TABLE plt2 DETACH PARTITION plt2_p3;
+ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT;
+ANALYZE plt2;
+EXPLAIN (COSTS OFF)
+SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c;
+  QUERY PLAN  
+--
+ Sort
+   Sort Key: t1.c
+   ->  HashAggregate
+ Group Key: t1.c, t2.c
+ ->  Result
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.c = t1.c)
+   ->  Seq Scan on plt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on plt1_p1 t1
+   Filter: ((a % 25) = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.c = t1_1.c)
+   ->  Seq Scan on plt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on plt1_p2 t1_1
+   Filter: ((a % 25) = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.c = t1_2.c)
+   ->  Seq Scan on plt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on plt1_p3 t1_2
+   Filter: ((a % 25) = 0)
+(24 rows)
+
 --
 -- multiple levels of partitioning
 --
@@ -1869,3 +1940,30 @@ SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c);
->  Seq Scan on prt1_n_p2 t1_1
 (10 rows)
 
+-- partitionwise join can not be applied if only one of joining table has
+-- default partition
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 FOR VALUES FROM (500) TO (600);
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Hash Join
+ Hash Cond: (t2.b = t1.a)
+ ->  Append
+   ->  Seq Scan on prt2_p1 t2
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Seq Scan on prt2_p3 t2_2
+ ->  Hash
+   ->  Append
+ ->  Seq Scan on prt1_p1 t1
+  

Re: parallel append vs. simple UNION ALL

2018-03-01 Thread Rajkumar Raghuwanshi
018-03-01 18:38:38.726 IST [51712] LOG:  invalid record length at
0/1669488: wanted 24, got 0
2018-03-01 18:38:38.726 IST [51712] LOG:  redo done at 0/1669420
2018-03-01 18:38:38.726 IST [51712] LOG:  last completed transaction was at
log time 2018-03-01 18:38:36.53573+05:30
2018-03-01 18:38:38.744 IST [50071] LOG:  database system is ready to
accept connections

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: server crash in nodeAppend.c

2018-02-28 Thread Rajkumar Raghuwanshi
On Wed, Feb 28, 2018 at 9:29 PM, Robert Haas <robertmh...@gmail.com> wrote:

> Nice test case.  I pushed commit
> ce1663cdcdbd9bf15c81570277f70571b3727dd3, including your test case, to
> fix this.


Thanks Robert for fix and commit. I have reverified commit, this is working
fine now.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Restrict concurrent update/delete with UPDATE of partition key

2018-02-27 Thread Rajkumar Raghuwanshi
On Wed, Feb 14, 2018 at 5:44 PM, Amit Kapila <amit.kapil...@gmail.com>
wrote:

> +# Concurrency error from GetTupleForTrigger
> +# Concurrency error from ExecLockRows
>
> I think you don't need to mention above sentences in spec files.
> Apart from that, your patch looks good to me.  I have marked it as
> Ready For Committer.
>

I too have tested this feature with isolation framework and this look good
to me.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: parallel append vs. simple UNION ALL

2018-02-27 Thread Rajkumar Raghuwanshi
On Sat, Feb 24, 2018 at 2:55 AM, Robert Haas <robertmh...@gmail.com> wrote:

> 0001 is pretty much the same as the subquery-smarts.patch file I
> attached to the previous email.  I don't see much reason not to go
> ahead and commit this, although it could use a test case.  It makes
> the simple/flattened case work.  After some study I think that the
> gather-parameter handling is correct, although if somebody felt like
> reviewing that portion especially I wouldn't say no.
>

I have applied 0001 on pg-head, and while playing with regression tests, it
crashed with below test case.

postgres=# SET min_parallel_table_scan_size=0;
SET
postgres=# SELECT * FROM information_schema.foreign_data_wrapper_options
ORDER BY 1, 2, 3;
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The connection to the server was lost. Attempting reset: Failed.

--logfile
2018-02-26 22:06:07.331 IST [43508] LOG:  database system is ready to
accept connections
TRAP: FailedAssertion("!(subpath->parallel_safe)", File: "pathnode.c",
Line: 1813)
2018-02-26 22:06:42.345 IST [43508] LOG:  server process (PID 43519) was
terminated by signal 6: Aborted
2018-02-26 22:06:42.345 IST [43508] DETAIL:  Failed process was running:
SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1,
2, 3;

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Runtime Partition Pruning

2018-02-21 Thread Rajkumar Raghuwanshi
rocnode.c:446
#17 0x0070c376 in ExecProcNode (node=0x2697808) at
../../../src/include/executor/executor.h:239
#18 0x0070c70e in ExecNestLoop (pstate=0x262c0a0) at
nodeNestloop.c:160
#19 0x006deb3a in ExecProcNodeFirst (node=0x262c0a0) at
execProcnode.c:446
#20 0x006fb9ee in ExecProcNode (node=0x262c0a0) at
../../../src/include/executor/executor.h:239
#21 0x006fbcc4 in ExecHashJoinImpl (pstate=0x262bec8, parallel=0
'\000') at nodeHashjoin.c:262
#22 0x006fc3fd in ExecHashJoin (pstate=0x262bec8) at
nodeHashjoin.c:565
#23 0x006deb3a in ExecProcNodeFirst (node=0x262bec8) at
execProcnode.c:446
#24 0x006ea5bd in ExecProcNode (node=0x262bec8) at
../../../src/include/executor/executor.h:239
#25 0x006eaab0 in fetch_input_tuple (aggstate=0x262ba18) at
nodeAgg.c:406
#26 0x006ecd40 in agg_retrieve_direct (aggstate=0x262ba18) at
nodeAgg.c:1736
#27 0x006ec932 in ExecAgg (pstate=0x262ba18) at nodeAgg.c:1551
#28 0x006deb3a in ExecProcNodeFirst (node=0x262ba18) at
execProcnode.c:446
#29 0x006d59cd in ExecProcNode (node=0x262ba18) at
../../../src/include/executor/executor.h:239
#30 0x006d8326 in ExecutePlan (estate=0x262b7c8,
planstate=0x262ba18, use_parallel_mode=0 '\000', operation=CMD_SELECT,
sendTuples=1 '\001', numberTuples=0,
direction=ForwardScanDirection, dest=0x7f6cf676c7f0, execute_once=1
'\001') at execMain.c:1721
#31 0x006d5f9f in standard_ExecutorRun (queryDesc=0x258aa98,
direction=ForwardScanDirection, count=0, execute_once=1 '\001') at
execMain.c:361
#32 0x006d5dbb in ExecutorRun (queryDesc=0x258aa98,
direction=ForwardScanDirection, count=0, execute_once=1 '\001') at
execMain.c:304
#33 0x008b588b in PortalRunSelect (portal=0x25caa58, forward=1
'\001', count=0, dest=0x7f6cf676c7f0) at pquery.c:932
#34 0x008b5519 in PortalRun (portal=0x25caa58,
count=9223372036854775807, isTopLevel=1 '\001', run_once=1 '\001',
dest=0x7f6cf676c7f0, altdest=0x7f6cf676c7f0,
completionTag=0x7ffe0f75e5e0 "") at pquery.c:773
#35 0x008af540 in exec_simple_query (
query_string=0x2565728 "select count(*) from prt1 x where (x.a,x.b) in
(select t1.a,t2.b from prt1 t1,prt2 t2 where t1.a=t2.b) \nand (x.c) in
(select t3.c from plt1 t3,plt2 t4 where t3.c=t4.c);") at postgres.c:1120
#36 0x008b37d4 in PostgresMain (argc=1, argv=0x25910e0,
dbname=0x2590f40 "postgres", username=0x2562228 "edb") at postgres.c:4144
#37 0x00812afa in BackendRun (port=0x2588ea0) at postmaster.c:4412
#38 0x00000081226e in BackendStartup (port=0x2588ea0) at
postmaster.c:4084
*/

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Runtime Partition Pruning

2018-02-20 Thread Rajkumar Raghuwanshi
On Sat, Feb 17, 2018 at 2:27 PM, David Rowley <david.row...@2ndquadrant.com>
wrote:

> Hi,
>
> I've attached an updated patch, now at v10. v9 was short lived due to
> the evolution of Amit's which which this based on.
>
> This version is based on Amit's v27 of faster partition pruning [1]
> which can be applied atop of ad7dbee36.
>

Hi,

I have applied v10 patch on Amit's v27 over head ad7dbee36. I got "ERROR:
partition missing from Append subplans" with the patch. on head and only
with Amit's patches query is working fine, Please find test case below.

CREATE TABLE part ( c1 INT2, c2 DATE) PARTITION BY RANGE (c1);
CREATE TABLE part_p1 PARTITION OF part FOR VALUES FROM (0) TO (141)
PARTITION BY RANGE(c2);
CREATE TABLE part_p11 PARTITION OF part_p1 FOR VALUES FROM ('1/1/1997') TO
('2/1/1999');
CREATE TABLE part_p12 PARTITION OF part_p1 FOR VALUES FROM ('2/1/1999') TO
('2/1/2000');
CREATE TABLE part_p2 PARTITION OF part FOR VALUES FROM (141) TO (211)
PARTITION BY RANGE(c2);
CREATE TABLE part_p21 PARTITION OF part_p2 FOR VALUES FROM ('1/1/2000') TO
('2/1/2001');
CREATE TABLE part_p22 PARTITION OF part_p2 FOR VALUES FROM ('2/1/2001') TO
('2/1/2006');

INSERT INTO part VALUES (100,'1/1/1999');
INSERT INTO part VALUES (110,'1/1/1998');
INSERT INTO part VALUES (130,'1/1/2000');
INSERT INTO part VALUES (170,'1/1/2000');
INSERT INTO part VALUES (180,'1/1/2001');
INSERT INTO part VALUES (190,'1/1/2006');
INSERT INTO part VALUES (200,'1/1/2000');
INSERT INTO part VALUES (210,'1/1/2002');

postgres=# PREPARE RTP AS SELECT * FROM PART WHERE c2 BETWEEN '1/1/1998'
AND '1/1/1999';
PREPARE
postgres=# EXPLAIN execute RTP;
 QUERY
PLAN
-
 Append  (cost=0.00..46.00 rows=12 width=6)
   ->  Seq Scan on part_p11  (cost=0.00..46.00 rows=12 width=6)
 Filter: ((c2 >= '1998-01-01'::date) AND (c2 <= '1999-01-01'::date))
(3 rows)

postgres=# execute RTP;
*ERROR:  partition missing from Append subplans*

deallocate RTP;
DROP TABLE part;

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Expression errors with "FOR UPDATE" and postgres_fdw with partition wise join enabled.

2018-02-12 Thread Rajkumar Raghuwanshi
Hi,

I am getting "ERROR:  unexpected expression in subquery output" and
"ERROR:  variable not found in subplan target lists" errors, for "FOR
UPDATE" with postgres_fdw. (when set enable_partition_wise_join to true);

Attached patch have queries which are throwing mentioned error on running
make check in contrib/postgres_fdw folder.

An independent test case to reproduce this is given below.

SET enable_partition_wise_join TO true;

CREATE EXTENSION postgres_fdw;
CREATE SERVER fdw_server FOREIGN DATA WRAPPER postgres_fdw OPTIONS (dbname
'postgres', port '5432', use_remote_estimate 'true');
CREATE USER MAPPING FOR PUBLIC SERVER fdw_server;

CREATE TABLE pt1 ( c1 int NOT NULL, c2 int NOT NULL, c3 text)PARTITION BY
RANGE(c1);
CREATE TABLE pt1p1 (like pt1);
CREATE TABLE pt1p2 (like pt1);

CREATE TABLE pt2 (c1 int NOT NULL, c2 int NOT NULL, c3 text) PARTITION BY
RANGE(c1);
CREATE TABLE pt2p1 (like pt2);
CREATE TABLE pt2p2 (like pt2);

INSERT INTO pt1p1 SELECT id, id + 1,'AAA' || to_char(id, 'FM000') FROM
generate_series(-100, -1) id;
INSERT INTO pt1p2 SELECT id, id + 1,'AAA' || to_char(id, 'FM000') FROM
generate_series(1, 99) id;
INSERT INTO pt2p1 SELECT id, id + 1,'AAA' || to_char(id, 'FM000') FROM
generate_series(-100, -1) id;
INSERT INTO pt2p2 SELECT id, id + 1,'AAA' || to_char(id, 'FM000') FROM
generate_series(1, 99) id;

CREATE FOREIGN TABLE ft1p1 PARTITION OF pt1 FOR VALUES FROM (MINVALUE) TO
(0) SERVER fdw_server OPTIONS (table_name 'pt1p1');
CREATE FOREIGN TABLE ft1p2 PARTITION OF pt1 FOR VALUES FROM (0) TO
(MAXVALUE) SERVER fdw_server OPTIONS (table_name 'pt1p2');
CREATE FOREIGN TABLE ft2p1 PARTITION OF pt2 FOR VALUES FROM (MINVALUE) TO
(0) SERVER fdw_server OPTIONS (table_name 'pt2p1');
CREATE FOREIGN TABLE ft2p2 PARTITION OF pt2 FOR VALUES FROM (0) TO
(MAXVALUE) SERVER fdw_server OPTIONS (table_name 'pt2p2');

ANALYZE pt1;
ANALYZE pt2;
ANALYZE ft1p1;
ANALYZE ft1p2;
ANALYZE ft2p1;
ANALYZE ft2p2;

EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM pt1  WHERE c1 = 50) t1 INNER
JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM pt1  WHERE c1 between 50 and
60) t2 FULL JOIN (SELECT c1 FROM pt2 WHERE c1 between 50 and 60) t3 ON
(t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON
(TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1;
ERROR:  unexpected expression in subquery output

EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM pt1) t1 INNER JOIN (SELECT
t2.c1, t3.c1 FROM (SELECT c1 FROM pt1) t2 FULL JOIN (SELECT c1 FROM pt1) t3
ON (t2.c1 = t3.c1)) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR
UPDATE OF t1;
ERROR:  variable not found in subplan target lists

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 68fdfdc..5466007 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -1913,4 +1913,9 @@ EXPLAIN (COSTS OFF)
 SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
 SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
 
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.a, ss.c1, ss.c2 FROM (SELECT a FROM fprt1) t1 INNER JOIN (SELECT t2.a, t3.a FROM (SELECT a FROM fprt1  WHERE a between 50 and 60) t2 FULL JOIN (SELECT a FROM fprt1) t3 ON (t2.a = t3.a)) ss(c1, c2) ON (TRUE) ORDER BY t1.a, ss.c1, ss.c2 FOR UPDATE OF t1;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.a, ss.c1, ss.c2 FROM (SELECT a FROM fprt1) t1 INNER JOIN (SELECT t2.a, t3.a FROM (SELECT a FROM fprt1) t2 FULL JOIN (SELECT a FROM fprt1) t3 ON (t2.a = t3.a)) ss(c1, c2) ON (TRUE) ORDER BY t1.a, ss.c1, ss.c2 FOR UPDATE OF t1;
 RESET enable_partition_wise_join;


Re: Query running for very long time (server hanged) with parallel append

2018-02-05 Thread Rajkumar Raghuwanshi
On Mon, Feb 5, 2018 at 3:29 PM, Amit Khandekar <amitdkhan...@gmail.com>
wrote:

>
> Attached is a patch that fixes this issue on the above lines.
>

Patch applied cleanly and work fine for me. mentioned issue is not
reproducible now.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Multi-level hierarchy with parallel append can lead to an extra subplan.

2018-01-09 Thread Rajkumar Raghuwanshi
Hi,

I am getting extra subplan when using parallel append with multi-level
hierarchy,  leading to data corruption.
Please see below test case.

-- set below parameters to encourage use of parallel plans
SET parallel_setup_cost=0;
SET parallel_tuple_cost=0;
SET min_parallel_table_scan_size=0;
SET max_parallel_workers_per_gather=4;

--create below data set
CREATE TABLE RM38941_inherit (c1 INTEGER PRIMARY KEY,c2 INTEGER,c3
CHAR(10));
INSERT INTO RM38941_inherit SELECT i, i % 125, to_char(i % 4, 'FM')
FROM generate_series(0, 499,2) i;

CREATE TABLE RM38941_inherit_t1 () INHERITS (RM38941_inherit);
INSERT INTO RM38941_inherit_t1 SELECT i, i % 125, to_char(i % 4, 'FM')
FROM generate_series(0, 499,3) i;

CREATE TABLE RM38941_inherit_t2 () INHERITS (RM38941_inherit);
INSERT INTO RM38941_inherit_t2 SELECT i, i % 125, to_char(i % 4, 'FM')
FROM generate_series(0, 499,5) i;

CREATE TABLE RM38941_union_t1 (c1 INTEGER PRIMARY KEY,c2 INTEGER,c3
CHAR(10));
INSERT INTO RM38941_union_t1 SELECT i, i % 125, to_char(i % 4, 'FM')
FROM generate_series(0, 499,2) i;

CREATE TABLE RM38941_union_t2 (c1 INTEGER PRIMARY KEY,c2 INTEGER,c3
CHAR(10));
INSERT INTO RM38941_union_t2 SELECT i, i % 125, to_char(i % 4, 'FM')
FROM generate_series(0, 499,4) i;

ALTER TABLE RM38941_union_t1 SET (parallel_workers = 0);
ALTER TABLE RM38941_inherit_t1 SET (parallel_workers = 0);

--with parallel_append
SET enable_parallel_append=on;
postgres=# EXPLAIN (COSTS OFF)
postgres-# SELECT AVG(c1),SUM(c2) FROM (SELECT c1,c2 FROM RM38941_union_t2
UNION ALL SELECT c1,c2 FROM RM38941_inherit UNION ALL SELECT c1,c2 FROM
RM38941_union_t1)UA;
  QUERY PLAN
---
 Finalize Aggregate
   ->  Gather
 Workers Planned: 3
 ->  Partial Aggregate
   ->  Parallel Append
 ->  Seq Scan on rm38941_inherit_t1
 ->  Seq Scan on rm38941_union_t1
 ->  Parallel Seq Scan on rm38941_union_t2
 ->  Parallel Seq Scan on rm38941_inherit
 ->  Parallel Seq Scan on rm38941_inherit_t2
 ->  Parallel Append
   ->  Seq Scan on rm38941_inherit_t1
   ->  Parallel Seq Scan on rm38941_inherit
   ->  Parallel Seq Scan on rm38941_inherit_t2
(14 rows)

postgres=# SELECT AVG(c1),SUM(c2) FROM (SELECT c1,c2 FROM RM38941_union_t2
UNION ALL SELECT c1,c2 FROM RM38941_inherit UNION ALL SELECT c1,c2 FROM
RM38941_union_t1)UA;
 avg  |  sum
--+---
 248.6983676366217175 | 86916
(1 row)


--without parallel_append
SET enable_parallel_append=off;
postgres=# EXPLAIN (COSTS OFF)
postgres-# SELECT AVG(c1),SUM(c2) FROM (SELECT c1,c2 FROM RM38941_union_t2
UNION ALL SELECT c1,c2 FROM RM38941_inherit UNION ALL SELECT c1,c2 FROM
RM38941_union_t1)UA;
 QUERY PLAN

 Aggregate
   ->  Append
 ->  Seq Scan on rm38941_union_t2
 ->  Seq Scan on rm38941_inherit
 ->  Seq Scan on rm38941_inherit_t1
 ->  Seq Scan on rm38941_inherit_t2
 ->  Seq Scan on rm38941_union_t1
(7 rows)

postgres=# SELECT AVG(c1),SUM(c2) FROM (SELECT c1,c2 FROM RM38941_union_t2
UNION ALL SELECT c1,c2 FROM RM38941_inherit UNION ALL SELECT c1,c2 FROM
RM38941_union_t1)UA;
 avg  |  sum
--+---
 248.6917040358744395 | 55083
(1 row)

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation


Re: [HACKERS] Partition-wise join for join between (declaratively) partitioned tables

2017-12-22 Thread Rajkumar Raghuwanshi
On Wed, Dec 20, 2017 at 5:21 PM, Ashutosh Bapat <
ashutosh.ba...@enterprisedb.com> wrote:

> Thanks. Here are some comments
>
> Thanks Ashutosh for review and suggestions.


> +-- test default partition behavior for range
> +ALTER TABLE prt1 DETACH PARTITION prt1_p3;
> +ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
> +ALTER TABLE prt2 DETACH PARTITION prt2_p3;
> +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
>
> I think we need an ANALYZE here in case the statistics gets updated while
> DETACH and ATTACH is going on. Other testcases also need to be updated with
> ANALYZE, including the negative one.
>
Done.


>
> +-- partition-wise join can not be applied if the only one of joining
> table have
>
> Correction: ... if only one of the joining tables has ...
>
Done.


> Please add the patch to the next commitfest so that it's not
> forgotten.

Done.
Added to CF: https://commitfest.postgresql.org/16/1426/


> I think we can get rid of the multi-level partition-wise
> testcase as well. Also, since we are re-attaching existing partition
> tables as default partitions, we don't need to check the output as
> well; just plan should be enough.
>
Ok. Done.

updated test patch attached.
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 27ab852..d4c875a 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -1337,6 +1337,74 @@ SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, ph
  574. | 574.5000 | 1148. | 0011 | 0011 | A0011
 (12 rows)
 
+-- test default partition behavior for range
+ALTER TABLE prt1 DETACH PARTITION prt1_p3;
+ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
+ANALYZE prt1;
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.b = t1.a)
+   ->  Seq Scan on prt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.b = t1_1.a)
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on prt1_p2 t1_1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.b = t1_2.a)
+   ->  Seq Scan on prt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on prt1_p3 t1_2
+   Filter: (b = 0)
+(21 rows)
+
+-- test default partition behavior for list
+ALTER TABLE plt1 DETACH PARTITION plt1_p3;
+ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT;
+ANALYZE plt1;
+ALTER TABLE plt2 DETACH PARTITION plt2_p3;
+ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT;
+ANALYZE plt2;
+EXPLAIN (COSTS OFF)
+SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c;
+  QUERY PLAN  
+--
+ Sort
+   Sort Key: t1.c, t2.c
+   ->  HashAggregate
+ Group Key: t1.c, t2.c
+ ->  Result
+   ->  Append
+ ->  Hash Right Join
+   Hash Cond: (t1.c = t2.c)
+   ->  Seq Scan on plt1_p1 t1
+   ->  Hash
+ ->  Seq Scan on plt2_p1 t2
+ ->  Hash Right Join
+   Hash Cond: (t1_1.c = t2_1.c)
+   ->  Seq Scan on plt1_p2 t1_1
+   ->  Hash
+ ->  Seq Scan on plt2_p2 t2_1
+ ->  Hash Right Join
+   Hash Cond: (t1_2.c = t2_2.c)
+   ->  Seq Scan on plt1_p3 t1_2
+   ->  Hash
+ ->  Seq Scan on plt2_p3 t2_2
+(21 rows)
+
 --
 -- multiple levels of partitioning
 --
@@ -1868,3 +1936,30 @@ SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c);
->  Seq Scan on prt1_n_p2 t1_1
 (10 rows)
 
+-- partition-wise join can not be applied if only one of joining table has
+-- default partition
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 FOR VALUES FROM (500) TO (600);
+ANALYZE prt2;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN

Re: [HACKERS] Partition-wise join for join between (declaratively) partitioned tables

2017-12-04 Thread Rajkumar Raghuwanshi
On Tue, Dec 5, 2017 at 11:04 AM, Rajkumar Raghuwanshi
<rajkumar.raghuwan...@enterprisedb.com> wrote:
> On Mon, Dec 4, 2017 at 7:34 AM, Ashutosh Bapat
> <ashutosh.ba...@enterprisedb.com> wrote:
>> I agree, the patch looks longer than expected. I think, it's important
>> to have some testcases to test partition-wise join with default
>> partitions. I think we need at least one test for range default
>> partitions, one test for list partitioning, one for multi-level
>> partitioning and one negative testcase with default partition missing
>> from one side of join.
>>
>> May be we could reduce the number of SQL commands and queries in the
>> patch by adding default partition to every table that participates in
>> partition-wise join (leave the tables participating in negative tests
>> aside.). But that's going to increase the size of EXPLAIN outputs and
>> query results. The negative test may simply drop the default partition
>> from one of the tables.
>>
>> For every table being tested, the patch adds two ALTER TABLE commands,
>> one for detaching an existing partition and then attach the same as
>> default partition. Alternative to that is just add a new default
>> partition without detaching and existing partition. But then the
>> default partition needs to populated with some data, which requires 1
>> INSERT statement at least. That doesn't reduce the size of patch, but
>> increases the output of query and EXPLAIN plan.
>>
>> May be in case of multi-level partitioning test, we don't need to add
>> DEFAULT in every partitioned relation; adding to one of them would be
>> enough. May be add it to the parent, but that too can be avoided. That
>> would reduce the size of patch a bit.
>
> Thanks Ashutosh for suggestions.
>
> I have reduced test cases as suggested. Attaching updated patch.
>
Sorry Attached wrong patch.

attaching correct patch now.
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 27ab852..a60ba7f 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -1337,6 +1337,96 @@ SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, ph
  574. | 574.5000 | 1148. | 0011 | 0011 | A0011
 (12 rows)
 
+-- test default partition behavior for range
+ALTER TABLE prt1 DETACH PARTITION prt1_p3;
+ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.b = t1.a)
+   ->  Seq Scan on prt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.b = t1_1.a)
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on prt1_p2 t1_1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.b = t1_2.a)
+   ->  Seq Scan on prt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on prt1_p3 t1_2
+   Filter: (b = 0)
+(21 rows)
+
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+  a  |  c   |  b  |  c   
+-+--+-+--
+   0 |  |   0 | 
+ 150 | 0150 | 150 | 0150
+ 300 | 0300 | 300 | 0300
+ 450 | 0450 | 450 | 0450
+(4 rows)
+
+-- test default partition behavior for list
+ALTER TABLE plt1 DETACH PARTITION plt1_p3;
+ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT;
+ALTER TABLE plt2 DETACH PARTITION plt2_p3;
+ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT;
+EXPLAIN (COSTS OFF)
+SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c;
+  QUERY PLAN  
+--
+ Sort
+   Sort Key: t1.c, t2.c
+   ->  HashAggregate
+ Group Key: t1.c, t2.c
+ ->  Result
+   ->  Append
+ ->  Hash Right Join
+   Hash Cond: (t1.c = t2.c)
+   ->  Seq Scan on plt1_p1 t1
+   ->  Hash
+ ->  Seq Scan on plt2_p1 t2
+ ->  Hash Rig

Re: [HACKERS] Partition-wise join for join between (declaratively) partitioned tables

2017-12-04 Thread Rajkumar Raghuwanshi
On Mon, Dec 4, 2017 at 7:34 AM, Ashutosh Bapat
<ashutosh.ba...@enterprisedb.com> wrote:
> I agree, the patch looks longer than expected. I think, it's important
> to have some testcases to test partition-wise join with default
> partitions. I think we need at least one test for range default
> partitions, one test for list partitioning, one for multi-level
> partitioning and one negative testcase with default partition missing
> from one side of join.
>
> May be we could reduce the number of SQL commands and queries in the
> patch by adding default partition to every table that participates in
> partition-wise join (leave the tables participating in negative tests
> aside.). But that's going to increase the size of EXPLAIN outputs and
> query results. The negative test may simply drop the default partition
> from one of the tables.
>
> For every table being tested, the patch adds two ALTER TABLE commands,
> one for detaching an existing partition and then attach the same as
> default partition. Alternative to that is just add a new default
> partition without detaching and existing partition. But then the
> default partition needs to populated with some data, which requires 1
> INSERT statement at least. That doesn't reduce the size of patch, but
> increases the output of query and EXPLAIN plan.
>
> May be in case of multi-level partitioning test, we don't need to add
> DEFAULT in every partitioned relation; adding to one of them would be
> enough. May be add it to the parent, but that too can be avoided. That
> would reduce the size of patch a bit.

Thanks Ashutosh for suggestions.

I have reduced test cases as suggested. Attaching updated patch.

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 27ab852..f83166b 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -1337,6 +1337,96 @@ SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, ph
  574. | 574.5000 | 1148. | 0011 | 0011 | A0011
 (12 rows)
 
+-- test default partition behavior for range
+ALTER TABLE prt1 DETACH PARTITION prt1_p3;
+ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT;
+ALTER TABLE prt2 DETACH PARTITION prt2_p3;
+ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT;
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+QUERY PLAN
+--
+ Sort
+   Sort Key: t1.a
+   ->  Append
+ ->  Hash Join
+   Hash Cond: (t2.b = t1.a)
+   ->  Seq Scan on prt2_p1 t2
+   ->  Hash
+ ->  Seq Scan on prt1_p1 t1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_1.b = t1_1.a)
+   ->  Seq Scan on prt2_p2 t2_1
+   ->  Hash
+ ->  Seq Scan on prt1_p2 t1_1
+   Filter: (b = 0)
+ ->  Hash Join
+   Hash Cond: (t2_2.b = t1_2.a)
+   ->  Seq Scan on prt2_p3 t2_2
+   ->  Hash
+ ->  Seq Scan on prt1_p3 t1_2
+   Filter: (b = 0)
+(21 rows)
+
+SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b;
+  a  |  c   |  b  |  c   
+-+--+-+--
+   0 |  |   0 | 
+ 150 | 0150 | 150 | 0150
+ 300 | 0300 | 300 | 0300
+ 450 | 0450 | 450 | 0450
+(4 rows)
+
+-- test default partition behavior for list
+ALTER TABLE plt1 DETACH PARTITION plt1_p3;
+ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT;
+ALTER TABLE plt2 DETACH PARTITION plt2_p3;
+ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT;
+EXPLAIN (COSTS OFF)
+SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c;
+  QUERY PLAN  
+--
+ Sort
+   Sort Key: t1.c, t2.c
+   ->  HashAggregate
+ Group Key: t1.c, t2.c
+ ->  Result
+   ->  Append
+ ->  Hash Right Join
+   Hash Cond: (t1.c = t2.c)
+   ->  Seq Scan on plt1_p1 t1
+   ->  Hash
+ ->  Seq Scan on plt2_p1 t2
+ ->  Hash Right Join
+   Hash Cond: (t1_1.c = t2_1.c)
+   ->  Seq Scan on plt1_p2 t1_1
+   ->  Hash
+ ->  Seq Scan on plt2_p2 

Re: [HACKERS] Partition-wise aggregation/grouping

2017-11-27 Thread Rajkumar Raghuwanshi
On Thu, Nov 23, 2017 at 6:38 PM, Jeevan Chalke
<jeevan.cha...@enterprisedb.com> wrote:
> Let me know if I missed any comment to be fixed.

Hi,

I have applied v8 patches on commit id 8735978e7aebfbc499843630131c18d1f7346c79,
and getting below observation, please take a look.

Observation:
"when joining a foreign partition table with local partition table
getting wrong output
with partition_wise_join enabled, same is working fine on PG-head
without aggregates patch."

Test-case:
CREATE EXTENSION postgres_fdw;
CREATE SERVER pwj_server FOREIGN DATA WRAPPER postgres_fdw OPTIONS
(dbname 'postgres',port '5432',use_remote_estimate 'true');
CREATE USER MAPPING FOR PUBLIC SERVER pwj_server;

CREATE TABLE fplt1 (a int,  c text) PARTITION BY LIST(c);
CREATE TABLE fplt1_p1 (a int,  c text);
CREATE TABLE fplt1_p2 (a int,  c text);
CREATE FOREIGN TABLE ftplt1_p1 PARTITION OF fplt1 FOR VALUES IN
('', '0001', '0002', '0003') SERVER pwj_server OPTIONS (TABLE_NAME
'fplt1_p1');
CREATE FOREIGN TABLE ftplt1_p2 PARTITION OF fplt1 FOR VALUES IN
('0004', '0005', '0006', '0007') SERVER pwj_server OPTIONS (TABLE_NAME
'fplt1_p2');
INSERT INTO fplt1_p1 SELECT i, to_char(i%8, 'FM') FROM
generate_series(0, 199, 2) i;
INSERT INTO fplt1_p2 SELECT i, to_char(i%8, 'FM') FROM
generate_series(200, 398, 2) i;

CREATE TABLE lplt2 (a int,  c text) PARTITION BY LIST(c);
CREATE TABLE lplt2_p1 PARTITION OF lplt2 FOR VALUES IN ('',
'0001', '0002', '0003');
CREATE TABLE lplt2_p2 PARTITION OF lplt2 FOR VALUES IN ('0004',
'0005', '0006', '0007');
INSERT INTO lplt2 SELECT i, to_char(i%8, 'FM') FROM
generate_series(0, 398, 3) i;

SELECT t1.c, t2.c,count(*) FROM fplt1 t1 JOIN lplt2 t2 ON (t1.c = t2.c
and t1.a = t2.a)  WHERE t1.a % 25 = 0 GROUP BY 1,2 ORDER BY t1.c,
t2.c;
  c   |  c   | count
--+--+---
  |  | 1
 0004 | 0004 | 1
 0006 | 0006 | 1
(3 rows)

SET enable_partition_wise_join = on;
SELECT t1.c, t2.c,count(*) FROM fplt1 t1 JOIN lplt2 t2 ON (t1.c = t2.c
and t1.a = t2.a)  WHERE t1.a % 25 = 0 GROUP BY 1,2 ORDER BY t1.c,
t2.c;
  c   |  c   | count
--+--+---
  |  | 1
 0004 | 0004 | 1
(2 rows)


Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation



Re: [HACKERS] Parallel Append implementation

2017-11-24 Thread Rajkumar Raghuwanshi
On Thu, Nov 23, 2017 at 2:22 PM, amul sul <sula...@gmail.com> wrote:
> Look like it is the same crash what v20 claim to be fixed, indeed I
> missed to add fix[1] in v20 patch, sorry about that. Attached updated
> patch includes aforementioned fix.

Hi,

I have applied latest v21 patch, it got crashed when enabled
partition-wise-join,
same query is working fine with and without partition-wise-join
enabled on PG-head.
please take a look.

SET enable_partition_wise_join TO true;

CREATE TABLE pt1 (a int, b int, c text, d int) PARTITION BY LIST(c);
CREATE TABLE pt1_p1 PARTITION OF pt1 FOR VALUES IN ('', '0001',
'0002', '0003');
CREATE TABLE pt1_p2 PARTITION OF pt1 FOR VALUES IN ('0004', '0005',
'0006', '0007');
CREATE TABLE pt1_p3 PARTITION OF pt1 FOR VALUES IN ('0008', '0009',
'0010', '0011');
INSERT INTO pt1 SELECT i % 20, i % 30, to_char(i % 12, 'FM'), i %
30 FROM generate_series(0, 9) i;
ANALYZE pt1;

CREATE TABLE pt2 (a int, b int, c text, d int) PARTITION BY LIST(c);
CREATE TABLE pt2_p1 PARTITION OF pt2 FOR VALUES IN ('', '0001',
'0002', '0003');
CREATE TABLE pt2_p2 PARTITION OF pt2 FOR VALUES IN ('0004', '0005',
'0006', '0007');
CREATE TABLE pt2_p3 PARTITION OF pt2 FOR VALUES IN ('0008', '0009',
'0010', '0011');
INSERT INTO pt2 SELECT i % 20, i % 30, to_char(i % 12, 'FM'), i %
30 FROM generate_series(0, 9) i;
ANALYZE pt2;

EXPLAIN ANALYZE
SELECT t1.c, sum(t2.a), COUNT(*) FROM pt1 t1 FULL JOIN pt2 t2 ON t1.c
= t2.c GROUP BY t1.c ORDER BY 1, 2, 3;
WARNING:  terminating connection because of crash of another server process
DETAIL:  The postmaster has commanded this server process to roll back
the current transaction and exit, because another server process
exited abnormally and possibly corrupted shared memory.
HINT:  In a moment you should be able to reconnect to the database and
repeat your command.
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The connection to the server was lost. Attempting reset: Failed.
!>

stack-trace is given below.

Core was generated by `postgres: parallel worker for PID 73935
 '.
Program terminated with signal 11, Segmentation fault.
#0  0x006dc4b3 in ExecProcNode (node=0x7f7f7f7f7f7f7f7e) at
../../../src/include/executor/executor.h:238
238if (node->chgParam != NULL) /* something changed? */
Missing separate debuginfos, use: debuginfo-install
keyutils-libs-1.4-5.el6.x86_64 krb5-libs-1.10.3-65.el6.x86_64
libcom_err-1.41.12-23.el6.x86_64 libselinux-2.0.94-7.el6.x86_64
openssl-1.0.1e-57.el6.x86_64 zlib-1.2.3-29.el6.x86_64
(gdb) bt
#0  0x006dc4b3 in ExecProcNode (node=0x7f7f7f7f7f7f7f7e) at
../../../src/include/executor/executor.h:238
#1  0x006dc72e in ExecAppend (pstate=0x26cd6e0) at nodeAppend.c:207
#2  0x006d1e7c in ExecProcNodeInstr (node=0x26cd6e0) at
execProcnode.c:446
#3  0x006dcee5 in ExecProcNode (node=0x26cd6e0) at
../../../src/include/executor/executor.h:241
#4  0x006dd38c in fetch_input_tuple (aggstate=0x26cd7f8) at
nodeAgg.c:699
#5  0x006e02eb in agg_fill_hash_table (aggstate=0x26cd7f8) at
nodeAgg.c:2536
#6  0x006dfb2b in ExecAgg (pstate=0x26cd7f8) at nodeAgg.c:2148
#7  0x006d1e7c in ExecProcNodeInstr (node=0x26cd7f8) at
execProcnode.c:446
#8  0x006d1e4d in ExecProcNodeFirst (node=0x26cd7f8) at
execProcnode.c:430
#9  0x006c9439 in ExecProcNode (node=0x26cd7f8) at
../../../src/include/executor/executor.h:241
#10 0x006cbd73 in ExecutePlan (estate=0x26ccda0,
planstate=0x26cd7f8, use_parallel_mode=0 '\000', operation=CMD_SELECT,
sendTuples=1 '\001', numberTuples=0,
direction=ForwardScanDirection, dest=0x26b2ce0, execute_once=1
'\001') at execMain.c:1718
#11 0x006c9a12 in standard_ExecutorRun (queryDesc=0x26d7fa0,
direction=ForwardScanDirection, count=0, execute_once=1 '\001') at
execMain.c:361
#12 0x006c982e in ExecutorRun (queryDesc=0x26d7fa0,
direction=ForwardScanDirection, count=0, execute_once=1 '\001') at
execMain.c:304
#13 0x006d096c in ParallelQueryMain (seg=0x26322a8,
toc=0x7fda24d46000) at execParallel.c:1271
#14 0x0053272d in ParallelWorkerMain (main_arg=1203628635) at
parallel.c:1149
#15 0x007e8c99 in StartBackgroundWorker () at bgworker.c:841
#16 0x007fc029 in do_start_bgworker (rw=0x2656d00) at postmaster.c:5741
#17 0x007fc36b in maybe_start_bgworkers () at postmaster.c:5945
#18 0x007fb3fa in sigusr1_handler (postgres_signal_arg=10) at
postmaster.c:5134
#19 
#20 0x003dd26e1603 in __select_nocancel () at
../sysdeps/unix/syscall-template.S:82
#21 0x007f6bee in ServerLoop () at postmaster.c:1721
#22 0x007f63dd in PostmasterMain (argc=3, argv=0x2630180) at
postmaster.c:1365
#23 0x0072cb40 in main (argc=3, argv=0x2630180) at main.c:228

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation



Re: [HACKERS] path toward faster partition pruning

2017-11-21 Thread Rajkumar Raghuwanshi
On Fri, Nov 17, 2017 at 3:31 PM, Amit Langote <langote_amit...@lab.ntt.co.jp
> wrote:

>
> Support for hash partitioning and tests for the same.  Also, since
> update/delete on partitioned tables still depend on constraint exclusion
> for pruning, fix things such that get_relation_constraints includes
> partition constraints in its result only for non-select queries (for
> selects we have the new pruning code).  Other bug fixes.
>
> Hi Amit,

I have applied attached patch set on commit
11e264517dff7a911d9e6494de86049cab42cde3 and try to test for hash
partition. I got a server crash with below test.

CREATE TABLE hp_tbl (a int, b int, c int) PARTITION BY HASH (a);
CREATE TABLE hp_tbl_p1 PARTITION OF hp_tbl FOR VALUES WITH (modulus 4,
remainder 0) PARTITION BY HASH (b);
CREATE TABLE hp_tbl_p1_p1 PARTITION OF hp_tbl_p1 FOR VALUES WITH (modulus
4, remainder 0) PARTITION BY HASH (c);
CREATE TABLE hp_tbl_p1_p1_p1 PARTITION OF hp_tbl_p1_p1 FOR VALUES WITH
(modulus 4, remainder 0);
CREATE TABLE hp_tbl_p2 PARTITION OF hp_tbl FOR VALUES WITH (modulus 4,
remainder 1) PARTITION BY HASH (b);
CREATE TABLE hp_tbl_p2_p1 PARTITION OF hp_tbl_p2 FOR VALUES WITH (modulus
4, remainder 1);
CREATE TABLE hp_tbl_p2_p2 PARTITION OF hp_tbl_p2 FOR VALUES WITH (modulus
4, remainder 2);
insert into  hp_tbl select i,i,i from generate_series(0,10)i where i not
in(2,4,6,7,10);

explain select * from hp_tbl where a = 2;
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The connection to the server was lost. Attempting reset: Failed.
!>

Thanks & Regards,
Rajkumar Raghuwanshi
QMG, EnterpriseDB Corporation