mirror of
				https://github.com/postgres/postgres.git
				synced 2025-11-04 00:02:52 -05:00 
			
		
		
		
	Clarify the logic in a few places in the new balanced merge code.
In selectnewtape(), use 'nOutputTapes' rather than 'nOutputRuns' in the check for whether to start a new tape or to append a new run to an existing tape. Until 'maxTapes' is reached, nOutputTapes is always equal to nOutputRuns, so it doesn't change the logic, but it seems more logical to compare # of tapes with # of tapes. Also, currently maxTapes is never modified after the merging begins, but written this way, the code would still work if it was. (Although the nOutputRuns == nOutputTapes assertion would need to be removed and using nOutputRuns % nOutputTapes to distribute the runs evenly across the tapes wouldn't do a good job anymore). Similarly in mergeruns(), change to USEMEM(state->tape_buffer_mem) to account for the memory used for tape buffers. It's equal to availMem currently, but tape_buffer_mem is more direct and future-proof. For example, if we changed the logic to only allocate half of the remaining memory to tape buffers, USEMEM(state->tape_buffer_mem) would still be correct. Coverity complained about these. Hopefully this patch helps it to understand the logic better. Thanks to Tom Lane for initial analysis.
This commit is contained in:
		
							parent
							
								
									b4ada4e19f
								
							
						
					
					
						commit
						166f94377c
					
				@ -2773,13 +2773,19 @@ inittapestate(Tuplesortstate *state, int maxTapes)
 | 
				
			|||||||
static void
 | 
					static void
 | 
				
			||||||
selectnewtape(Tuplesortstate *state)
 | 
					selectnewtape(Tuplesortstate *state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (state->nOutputRuns < state->maxTapes)
 | 
						/*
 | 
				
			||||||
 | 
						 * At the beginning of each merge pass, nOutputTapes and nOutputRuns are
 | 
				
			||||||
 | 
						 * both zero.  On each call, we create a new output tape to hold the next
 | 
				
			||||||
 | 
						 * run, until maxTapes is reached.  After that, we assign new runs to the
 | 
				
			||||||
 | 
						 * existing tapes in a round robin fashion.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (state->nOutputTapes < state->maxTapes)
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		/* Create a new tape to hold the next run */
 | 
							/* Create a new tape to hold the next run */
 | 
				
			||||||
		Assert(state->outputTapes[state->nOutputRuns] == NULL);
 | 
							Assert(state->outputTapes[state->nOutputRuns] == NULL);
 | 
				
			||||||
		Assert(state->nOutputRuns == state->nOutputTapes);
 | 
							Assert(state->nOutputRuns == state->nOutputTapes);
 | 
				
			||||||
		state->destTape = LogicalTapeCreate(state->tapeset);
 | 
							state->destTape = LogicalTapeCreate(state->tapeset);
 | 
				
			||||||
		state->outputTapes[state->nOutputRuns] = state->destTape;
 | 
							state->outputTapes[state->nOutputTapes] = state->destTape;
 | 
				
			||||||
		state->nOutputTapes++;
 | 
							state->nOutputTapes++;
 | 
				
			||||||
		state->nOutputRuns++;
 | 
							state->nOutputRuns++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@ -2908,7 +2914,7 @@ mergeruns(Tuplesortstate *state)
 | 
				
			|||||||
	 * divide this memory between the input and output tapes in the pass.
 | 
						 * divide this memory between the input and output tapes in the pass.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	state->tape_buffer_mem = state->availMem;
 | 
						state->tape_buffer_mem = state->availMem;
 | 
				
			||||||
	USEMEM(state, state->availMem);
 | 
						USEMEM(state, state->tape_buffer_mem);
 | 
				
			||||||
#ifdef TRACE_SORT
 | 
					#ifdef TRACE_SORT
 | 
				
			||||||
	if (trace_sort)
 | 
						if (trace_sort)
 | 
				
			||||||
		elog(LOG, "worker %d using %zu KB of memory for tape buffers",
 | 
							elog(LOG, "worker %d using %zu KB of memory for tape buffers",
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user