mirror of
				https://github.com/postgres/postgres.git
				synced 2025-10-29 00:05:29 -04:00 
			
		
		
		
	Post-pgindent cleanup
Make slightly better decisions about indentation than what pgindent is capable of. Mostly breaking out long function calls into one line per argument, with a few other minor adjustments. No functional changes- all whitespace. pgindent ran cleanly (didn't change anything) after. Passes all regressions.
This commit is contained in:
		
							parent
							
								
									dedf7e9919
								
							
						
					
					
						commit
						551938ae22
					
				| @ -1300,7 +1300,8 @@ hstore_to_json_loose(PG_FUNCTION_ARGS) | ||||
| 			 * digit as numeric - could be a zip code or similar | ||||
| 			 */ | ||||
| 			if (src->len > 0 && | ||||
| 			!(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) && | ||||
| 				!(src->data[0] == '0' && | ||||
| 				  isdigit((unsigned char) src->data[1])) && | ||||
| 				strspn(src->data, "+-0123456789Ee.") == src->len) | ||||
| 			{ | ||||
| 				/*
 | ||||
|  | ||||
| @ -441,6 +441,8 @@ print_rel_infos(RelInfoArr *rel_arr) | ||||
| 
 | ||||
| 	for (relnum = 0; relnum < rel_arr->nrels; relnum++) | ||||
| 		pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n", | ||||
| 			   rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname, | ||||
| 			 rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace); | ||||
| 			   rel_arr->rels[relnum].nspname, | ||||
| 			   rel_arr->rels[relnum].relname, | ||||
| 			   rel_arr->rels[relnum].reloid, | ||||
| 			   rel_arr->rels[relnum].tablespace); | ||||
| } | ||||
|  | ||||
| @ -341,10 +341,13 @@ create_new_objects(void) | ||||
| 		 * pg_dump only produces its output at the end, so there is little | ||||
| 		 * parallelism if using the pipe. | ||||
| 		 */ | ||||
| 		parallel_exec_prog(log_file_name, NULL, | ||||
| 		parallel_exec_prog(log_file_name, | ||||
| 						   NULL, | ||||
| 						   "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", | ||||
| 						 new_cluster.bindir, cluster_conn_opts(&new_cluster), | ||||
| 						   old_db->db_name, sql_file_name); | ||||
| 						   new_cluster.bindir, | ||||
| 						   cluster_conn_opts(&new_cluster), | ||||
| 						   old_db->db_name, | ||||
| 						   sql_file_name); | ||||
| 	} | ||||
| 
 | ||||
| 	/* reap all children */ | ||||
|  | ||||
| @ -53,8 +53,11 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, | ||||
| 									  new_pgdata, old_pgdata); | ||||
| 
 | ||||
| 		for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) | ||||
| 			parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, | ||||
| 								new_pgdata, os_info.old_tablespaces[tblnum]); | ||||
| 			parallel_transfer_all_new_dbs(old_db_arr, | ||||
| 										  new_db_arr, | ||||
| 										  old_pgdata, | ||||
| 										  new_pgdata, | ||||
| 										  os_info.old_tablespaces[tblnum]); | ||||
| 		/* reap all children */ | ||||
| 		while (reap_child(true) == true) | ||||
| 			; | ||||
| @ -230,12 +233,20 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, | ||||
| 		else | ||||
| 			snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno); | ||||
| 
 | ||||
| 		snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace, | ||||
| 		   map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode, | ||||
| 				 type_suffix, extent_suffix); | ||||
| 		snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace, | ||||
| 		   map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode, | ||||
| 				 type_suffix, extent_suffix); | ||||
| 		snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", | ||||
| 				 map->old_tablespace, | ||||
| 				 map->old_tablespace_suffix, | ||||
| 				 map->old_db_oid, | ||||
| 				 map->old_relfilenode, | ||||
| 				 type_suffix, | ||||
| 				 extent_suffix); | ||||
| 		snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", | ||||
| 				 map->new_tablespace, | ||||
| 				 map->new_tablespace_suffix, | ||||
| 				 map->new_db_oid, | ||||
| 				 map->new_relfilenode, | ||||
| 				 type_suffix, | ||||
| 				 extent_suffix); | ||||
| 
 | ||||
| 		/* Is it an extent, fsm, or vm file? */ | ||||
| 		if (type_suffix[0] != '\0' || segno != 0) | ||||
|  | ||||
| @ -999,8 +999,12 @@ top: | ||||
| 							 * this in a special way (see below). | ||||
| 							 */ | ||||
| 							fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n", | ||||
| 							  agg->start_time, agg->cnt, agg->sum, agg->sum2, | ||||
| 									agg->min_duration, agg->max_duration); | ||||
| 									agg->start_time, | ||||
| 									agg->cnt, | ||||
| 									agg->sum, | ||||
| 									agg->sum2, | ||||
| 									agg->min_duration, | ||||
| 									agg->max_duration); | ||||
| 
 | ||||
| 							/* move to the next inteval */ | ||||
| 							agg->start_time = agg->start_time + agg_interval; | ||||
| @ -1625,7 +1629,6 @@ init(bool is_no_vacuum) | ||||
| 			/* have we reached the next interval (or end)? */ | ||||
| 			if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS)) | ||||
| 			{ | ||||
| 
 | ||||
| 				fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n", | ||||
| 						j, (int64) naccounts * scale, | ||||
| 						(int) (((int64) j * 100) / (naccounts * scale)), elapsed_sec, remaining_sec); | ||||
|  | ||||
| @ -610,9 +610,14 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup, | ||||
| 		newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate); | ||||
| 		if (newtup) | ||||
| 		{ | ||||
| 			blkno = gistbufferinginserttuples(buildstate, buffer, level, | ||||
| 											  &newtup, 1, childoffnum, | ||||
| 									InvalidBlockNumber, InvalidOffsetNumber); | ||||
| 			blkno = gistbufferinginserttuples(buildstate, | ||||
| 											  buffer, | ||||
| 											  level, | ||||
| 											  &newtup, | ||||
| 											  1, | ||||
| 											  childoffnum, | ||||
| 											  InvalidBlockNumber, | ||||
| 											  InvalidOffsetNumber); | ||||
| 			/* gistbufferinginserttuples() released the buffer */ | ||||
| 		} | ||||
| 		else | ||||
|  | ||||
| @ -3182,8 +3182,9 @@ l2: | ||||
| 				 * we weren't looking, start over. | ||||
| 				 */ | ||||
| 				if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) || | ||||
| 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data), | ||||
| 									 xwait)) | ||||
| 					!TransactionIdEquals( | ||||
| 									HeapTupleHeaderGetRawXmax(oldtup.t_data), | ||||
| 										 xwait)) | ||||
| 					goto l2; | ||||
| 
 | ||||
| 				can_continue = true; | ||||
| @ -3201,8 +3202,9 @@ l2: | ||||
| 				 * this point. Check for xmax change, and start over if so. | ||||
| 				 */ | ||||
| 				if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) || | ||||
| 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data), | ||||
| 									 xwait)) | ||||
| 					!TransactionIdEquals( | ||||
| 									HeapTupleHeaderGetRawXmax(oldtup.t_data), | ||||
| 										 xwait)) | ||||
| 					goto l2; | ||||
| 
 | ||||
| 				/* Otherwise check if it committed or aborted */ | ||||
| @ -4183,8 +4185,9 @@ l3: | ||||
| 
 | ||||
| 				/* if the xmax changed in the meantime, start over */ | ||||
| 				if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) || | ||||
| 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 									 xwait)) | ||||
| 					!TransactionIdEquals( | ||||
| 									HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 										 xwait)) | ||||
| 					goto l3; | ||||
| 				/* otherwise, we're good */ | ||||
| 				require_sleep = false; | ||||
| @ -4246,8 +4249,9 @@ l3: | ||||
| 				 * for xmax change, and start over if so. | ||||
| 				 */ | ||||
| 				if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) || | ||||
| 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 									 xwait)) | ||||
| 					!TransactionIdEquals( | ||||
| 									HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 										 xwait)) | ||||
| 					goto l3; | ||||
| 
 | ||||
| 				/*
 | ||||
| @ -4300,8 +4304,9 @@ l3: | ||||
| 				 * this point.	Check for xmax change, and start over if so. | ||||
| 				 */ | ||||
| 				if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) || | ||||
| 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 									 xwait)) | ||||
| 					!TransactionIdEquals( | ||||
| 									HeapTupleHeaderGetRawXmax(tuple->t_data), | ||||
| 										 xwait)) | ||||
| 					goto l3; | ||||
| 
 | ||||
| 				/*
 | ||||
|  | ||||
| @ -432,8 +432,8 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status) | ||||
| 	/*
 | ||||
| 	 * Determine which of the members of the MultiXactId are still of | ||||
| 	 * interest. This is any running transaction, and also any transaction | ||||
| 	 * that grabbed something stronger than just a lock and was committed. | ||||
| 	 * (An update that aborted is of no interest here.) | ||||
| 	 * that grabbed something stronger than just a lock and was committed. (An | ||||
| 	 * update that aborted is of no interest here.) | ||||
| 	 * | ||||
| 	 * (Removing dead members is just an optimization, but a useful one. Note | ||||
| 	 * we have the same race condition here as above: j could be 0 at the end | ||||
| @ -1349,7 +1349,9 @@ mXactCacheGetById(MultiXactId multi, MultiXactMember **members) | ||||
| 			memcpy(ptr, entry->members, size); | ||||
| 
 | ||||
| 			debug_elog3(DEBUG2, "CacheGet: found %s", | ||||
| 					 mxid_to_string(multi, entry->nmembers, entry->members)); | ||||
| 						mxid_to_string(multi, | ||||
| 									   entry->nmembers, | ||||
| 									   entry->members)); | ||||
| 			return entry->nmembers; | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @ -1546,7 +1546,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) | ||||
| 		 */ | ||||
| 		if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx]) | ||||
| 			elog(PANIC, "xlog write request %X/%X is past end of log %X/%X", | ||||
| 			(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write, | ||||
| 				 (uint32) (LogwrtResult.Write >> 32), | ||||
| 				 (uint32) LogwrtResult.Write, | ||||
| 				 (uint32) (XLogCtl->xlblocks[curridx] >> 32), | ||||
| 				 (uint32) XLogCtl->xlblocks[curridx]); | ||||
| 
 | ||||
| @ -7381,7 +7382,8 @@ CreateRestartPoint(int flags) | ||||
| 	{ | ||||
| 		ereport(DEBUG2, | ||||
| 				(errmsg("skipping restartpoint, already performed at %X/%X", | ||||
| 		(uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo))); | ||||
| 						(uint32) (lastCheckPoint.redo >> 32), | ||||
| 						(uint32) lastCheckPoint.redo))); | ||||
| 
 | ||||
| 		UpdateMinRecoveryPoint(InvalidXLogRecPtr, true); | ||||
| 		if (flags & CHECKPOINT_IS_SHUTDOWN) | ||||
|  | ||||
| @ -705,7 +705,8 @@ EventTriggerDDLCommandStart(Node *parsetree) | ||||
| 		return; | ||||
| 
 | ||||
| 	runlist = EventTriggerCommonSetup(parsetree, | ||||
| 									EVT_DDLCommandStart, "ddl_command_start", | ||||
| 									  EVT_DDLCommandStart, | ||||
| 									  "ddl_command_start", | ||||
| 									  &trigdata); | ||||
| 	if (runlist == NIL) | ||||
| 		return; | ||||
|  | ||||
| @ -9985,7 +9985,8 @@ AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid, | ||||
| void | ||||
| AlterRelationNamespaceInternal(Relation classRel, Oid relOid, | ||||
| 							   Oid oldNspOid, Oid newNspOid, | ||||
| 							 bool hasDependEntry, ObjectAddresses *objsMoved) | ||||
| 							   bool hasDependEntry, | ||||
| 							   ObjectAddresses *objsMoved) | ||||
| { | ||||
| 	HeapTuple	classTup; | ||||
| 	Form_pg_class classForm; | ||||
| @ -10024,8 +10025,11 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid, | ||||
| 
 | ||||
| 		/* Update dependency on schema if caller said so */ | ||||
| 		if (hasDependEntry && | ||||
| 			changeDependencyFor(RelationRelationId, relOid, | ||||
| 							 NamespaceRelationId, oldNspOid, newNspOid) != 1) | ||||
| 			changeDependencyFor(RelationRelationId, | ||||
| 								relOid, | ||||
| 								NamespaceRelationId, | ||||
| 								oldNspOid, | ||||
| 								newNspOid) != 1) | ||||
| 			elog(ERROR, "failed to change schema dependency for relation \"%s\"", | ||||
| 				 NameStr(classForm->relname)); | ||||
| 
 | ||||
|  | ||||
| @ -2210,8 +2210,11 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, | ||||
| 
 | ||||
| 	if (trigdesc && trigdesc->trig_delete_after_row) | ||||
| 	{ | ||||
| 		HeapTuple	trigtuple = GetTupleForTrigger(estate, NULL, relinfo, | ||||
| 												 tupleid, LockTupleExclusive, | ||||
| 		HeapTuple	trigtuple = GetTupleForTrigger(estate, | ||||
| 												   NULL, | ||||
| 												   relinfo, | ||||
| 												   tupleid, | ||||
| 												   LockTupleExclusive, | ||||
| 												   NULL); | ||||
| 
 | ||||
| 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE, | ||||
| @ -2449,8 +2452,11 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, | ||||
| 
 | ||||
| 	if (trigdesc && trigdesc->trig_update_after_row) | ||||
| 	{ | ||||
| 		HeapTuple	trigtuple = GetTupleForTrigger(estate, NULL, relinfo, | ||||
| 												 tupleid, LockTupleExclusive, | ||||
| 		HeapTuple	trigtuple = GetTupleForTrigger(estate, | ||||
| 												   NULL, | ||||
| 												   relinfo, | ||||
| 												   tupleid, | ||||
| 												   LockTupleExclusive, | ||||
| 												   NULL); | ||||
| 
 | ||||
| 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE, | ||||
|  | ||||
| @ -948,7 +948,6 @@ PGTYPEStimestamp_defmt_asc(char *str, const char *fmt, timestamp * d) | ||||
| int | ||||
| PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout) | ||||
| { | ||||
| 
 | ||||
| 	if (TIMESTAMP_NOT_FINITE(*tin)) | ||||
| 		*tout = *tin; | ||||
| 
 | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user