mirror of
				https://github.com/postgres/postgres.git
				synced 2025-10-31 00:03:57 -04:00 
			
		
		
		
	Test instrumentation of Hash nodes with parallel query.
Commit 8526bcb2df76d5171b4f4d6dc7a97560a73a5eff fixed bugs related to both Sort and Hash, but only added a test case for Sort. This adds a test case for Hash to match. Thomas Munro Discussion: http://postgr.es/m/CAEepm=2-LRnfwUBZDqQt+XAcd0af_ykNyyVvP3h1uB1AQ=e-eA@mail.gmail.com
This commit is contained in:
		
							parent
							
								
									8526bcb2df
								
							
						
					
					
						commit
						7d3583ad9a
					
				| @ -6187,6 +6187,112 @@ $$); | ||||
|         1 |     1 | ||||
| (1 row) | ||||
| 
 | ||||
| rollback to settings; | ||||
| -- Exercise rescans.  We'll turn off parallel_leader_participation so | ||||
| -- that we can check that instrumentation comes back correctly. | ||||
| create table foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; | ||||
| alter table foo set (parallel_workers = 0); | ||||
| create table bar as select generate_series(1, 5000) as id, 'xxxxx'::text as t; | ||||
| alter table bar set (parallel_workers = 2); | ||||
| -- multi-batch with rescan, parallel-oblivious | ||||
| savepoint settings; | ||||
| set parallel_leader_participation = off; | ||||
| set min_parallel_table_scan_size = 0; | ||||
| set parallel_setup_cost = 0; | ||||
| set parallel_tuple_cost = 0; | ||||
| set max_parallel_workers_per_gather = 2; | ||||
| set enable_material = off; | ||||
| set enable_mergejoin = off; | ||||
| set work_mem = '64kB'; | ||||
| explain (costs off) | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
|                                 QUERY PLAN                                 | ||||
| -------------------------------------------------------------------------- | ||||
|  Aggregate | ||||
|    ->  Nested Loop Left Join | ||||
|          Join Filter: ((foo.id < (b1.id + 1)) AND (foo.id > (b1.id - 1))) | ||||
|          ->  Seq Scan on foo | ||||
|          ->  Gather | ||||
|                Workers Planned: 2 | ||||
|                ->  Hash Join | ||||
|                      Hash Cond: (b1.id = b2.id) | ||||
|                      ->  Parallel Seq Scan on bar b1 | ||||
|                      ->  Hash | ||||
|                            ->  Seq Scan on bar b2 | ||||
| (11 rows) | ||||
| 
 | ||||
| select count(*) from foo | ||||
|   left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|   on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
|  count  | ||||
| ------- | ||||
|      3 | ||||
| (1 row) | ||||
| 
 | ||||
| select final > 1 as multibatch | ||||
|   from hash_join_batches( | ||||
| $$ | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| $$); | ||||
|  multibatch  | ||||
| ------------ | ||||
|  t | ||||
| (1 row) | ||||
| 
 | ||||
| rollback to settings; | ||||
| -- single-batch with rescan, parallel-oblivious | ||||
| savepoint settings; | ||||
| set parallel_leader_participation = off; | ||||
| set min_parallel_table_scan_size = 0; | ||||
| set parallel_setup_cost = 0; | ||||
| set parallel_tuple_cost = 0; | ||||
| set max_parallel_workers_per_gather = 2; | ||||
| set enable_material = off; | ||||
| set enable_mergejoin = off; | ||||
| set work_mem = '4MB'; | ||||
| explain (costs off) | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
|                                 QUERY PLAN                                 | ||||
| -------------------------------------------------------------------------- | ||||
|  Aggregate | ||||
|    ->  Nested Loop Left Join | ||||
|          Join Filter: ((foo.id < (b1.id + 1)) AND (foo.id > (b1.id - 1))) | ||||
|          ->  Seq Scan on foo | ||||
|          ->  Gather | ||||
|                Workers Planned: 2 | ||||
|                ->  Hash Join | ||||
|                      Hash Cond: (b1.id = b2.id) | ||||
|                      ->  Parallel Seq Scan on bar b1 | ||||
|                      ->  Hash | ||||
|                            ->  Seq Scan on bar b2 | ||||
| (11 rows) | ||||
| 
 | ||||
| select count(*) from foo | ||||
|   left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|   on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
|  count  | ||||
| ------- | ||||
|      3 | ||||
| (1 row) | ||||
| 
 | ||||
| select final > 1 as multibatch | ||||
|   from hash_join_batches( | ||||
| $$ | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| $$); | ||||
|  multibatch  | ||||
| ------------ | ||||
|  f | ||||
| (1 row) | ||||
| 
 | ||||
| rollback to settings; | ||||
| -- A full outer join where every record is matched. | ||||
| -- non-parallel | ||||
|  | ||||
| @ -2170,6 +2170,66 @@ $$ | ||||
| $$); | ||||
| rollback to settings; | ||||
| 
 | ||||
| -- Exercise rescans.  We'll turn off parallel_leader_participation so | ||||
| -- that we can check that instrumentation comes back correctly. | ||||
| 
 | ||||
| create table foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; | ||||
| alter table foo set (parallel_workers = 0); | ||||
| create table bar as select generate_series(1, 5000) as id, 'xxxxx'::text as t; | ||||
| alter table bar set (parallel_workers = 2); | ||||
| 
 | ||||
| -- multi-batch with rescan, parallel-oblivious | ||||
| savepoint settings; | ||||
| set parallel_leader_participation = off; | ||||
| set min_parallel_table_scan_size = 0; | ||||
| set parallel_setup_cost = 0; | ||||
| set parallel_tuple_cost = 0; | ||||
| set max_parallel_workers_per_gather = 2; | ||||
| set enable_material = off; | ||||
| set enable_mergejoin = off; | ||||
| set work_mem = '64kB'; | ||||
| explain (costs off) | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| select count(*) from foo | ||||
|   left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|   on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| select final > 1 as multibatch | ||||
|   from hash_join_batches( | ||||
| $$ | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| $$); | ||||
| rollback to settings; | ||||
| 
 | ||||
| -- single-batch with rescan, parallel-oblivious | ||||
| savepoint settings; | ||||
| set parallel_leader_participation = off; | ||||
| set min_parallel_table_scan_size = 0; | ||||
| set parallel_setup_cost = 0; | ||||
| set parallel_tuple_cost = 0; | ||||
| set max_parallel_workers_per_gather = 2; | ||||
| set enable_material = off; | ||||
| set enable_mergejoin = off; | ||||
| set work_mem = '4MB'; | ||||
| explain (costs off) | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| select count(*) from foo | ||||
|   left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|   on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| select final > 1 as multibatch | ||||
|   from hash_join_batches( | ||||
| $$ | ||||
|   select count(*) from foo | ||||
|     left join (select b1.id, b1.t from bar b1 join bar b2 using (id)) ss | ||||
|     on foo.id < ss.id + 1 and foo.id > ss.id - 1; | ||||
| $$); | ||||
| rollback to settings; | ||||
| 
 | ||||
| -- A full outer join where every record is matched. | ||||
| 
 | ||||
| -- non-parallel | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user