Compare commits

..

No commits in common. "fd7d7b719137b5c427681a50c0a0ac2d745b68bd" and "b45242fd30ffa6e1e7f490cc400ecbd966880f41" have entirely different histories.

54 changed files with 792 additions and 1796 deletions

View File

@ -24,7 +24,7 @@ tests += {
'tests': [ 'tests': [
't/001_basic.pl', 't/001_basic.pl',
], ],
'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '', 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
'TAR': tar.found() ? tar.full_path() : '' }, 'TAR': tar.found() ? tar.path() : '' },
}, },
} }

View File

@ -34,7 +34,7 @@ tests += {
'sql': [ 'sql': [
'dblink', 'dblink',
], ],
'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
}, },
'tap': { 'tap': {
'tests': [ 'tests': [

View File

@ -39,7 +39,7 @@ tests += {
'postgres_fdw', 'postgres_fdw',
'query_cancel', 'query_cancel',
], ],
'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
}, },
'tap': { 'tap': {
'tests': [ 'tests': [

View File

@ -65,7 +65,7 @@
</para> </para>
<para> <para>
The minimum required version of <application>Meson</application> is 0.57.2. The minimum required version of <application>Meson</application> is 0.54.
</para> </para>
</listitem> </listitem>

View File

@ -37,12 +37,12 @@
<para> <para>
This module provides the <function>pg_buffercache_pages()</function> This module provides the <function>pg_buffercache_pages()</function>
function (wrapped in the <structname>pg_buffercache</structname> view), the function (wrapped in the <structname>pg_buffercache</structname> view),
<function>pg_buffercache_numa_pages()</function> function (wrapped in the <function>pg_buffercache_numa_pages()</function> function (wrapped in the
<structname>pg_buffercache_numa</structname> view), the <structname>pg_buffercache_numa</structname> view), the
<function>pg_buffercache_summary()</function> function, the <function>pg_buffercache_summary()</function> function, the
<function>pg_buffercache_usage_counts()</function> function, the <function>pg_buffercache_usage_counts()</function> function, the
<function>pg_buffercache_evict()</function> function, the <function>pg_buffercache_evict()</function>, the
<function>pg_buffercache_evict_relation()</function> function and the <function>pg_buffercache_evict_relation()</function> function and the
<function>pg_buffercache_evict_all()</function> function. <function>pg_buffercache_evict_all()</function> function.
</para> </para>
@ -55,7 +55,7 @@
</para> </para>
<para> <para>
The <function>pg_buffercache_numa_pages()</function> function provides The <function>pg_buffercache_numa_pages()</function> provides
<acronym>NUMA</acronym> node mappings for shared buffer entries. This <acronym>NUMA</acronym> node mappings for shared buffer entries. This
information is not part of <function>pg_buffercache_pages()</function> information is not part of <function>pg_buffercache_pages()</function>
itself, as it is much slower to retrieve. itself, as it is much slower to retrieve.

View File

@ -11,11 +11,10 @@ project('postgresql',
version: '19devel', version: '19devel',
license: 'PostgreSQL', license: 'PostgreSQL',
# We want < 0.62 for python 3.6 compatibility on old platforms. # We want < 0.56 for python 3.5 compatibility on old platforms. EPEL for
# RHEL 8 has 0.58. < 0.57 would require various additional # RHEL 7 has 0.55. < 0.54 would require replacing some uses of the fs
# backward-compatibility conditionals. # module, < 0.53 all uses of fs. So far there's no need to go to >=0.56.
# Meson 0.57.0 and 0.57.1 are buggy, therefore >=0.57.2. meson_version: '>=0.54',
meson_version: '>=0.57.2',
default_options: [ default_options: [
'warning_level=1', #-Wall equivalent 'warning_level=1', #-Wall equivalent
'b_pch=false', 'b_pch=false',
@ -1289,7 +1288,7 @@ pyopt = get_option('plpython')
python3_dep = not_found_dep python3_dep = not_found_dep
if not pyopt.disabled() if not pyopt.disabled()
pm = import('python') pm = import('python')
python3_inst = pm.find_installation(python.full_path(), required: pyopt) python3_inst = pm.find_installation(python.path(), required: pyopt)
if python3_inst.found() if python3_inst.found()
python3_dep = python3_inst.dependency(embed: true, required: pyopt) python3_dep = python3_inst.dependency(embed: true, required: pyopt)
# Remove this check after we depend on Meson >= 1.1.0 # Remove this check after we depend on Meson >= 1.1.0
@ -3151,13 +3150,13 @@ gen_kwlist_cmd = [
### ###
if host_system == 'windows' if host_system == 'windows'
pg_ico = meson.project_source_root() / 'src' / 'port' / 'win32.ico' pg_ico = meson.source_root() / 'src' / 'port' / 'win32.ico'
win32ver_rc = files('src/port/win32ver.rc') win32ver_rc = files('src/port/win32ver.rc')
rcgen = find_program('src/tools/rcgen', native: true) rcgen = find_program('src/tools/rcgen', native: true)
rcgen_base_args = [ rcgen_base_args = [
'--srcdir', '@SOURCE_DIR@', '--srcdir', '@SOURCE_DIR@',
'--builddir', meson.project_build_root(), '--builddir', meson.build_root(),
'--rcout', '@OUTPUT0@', '--rcout', '@OUTPUT0@',
'--out', '@OUTPUT1@', '--out', '@OUTPUT1@',
'--input', '@INPUT@', '--input', '@INPUT@',
@ -3166,11 +3165,11 @@ if host_system == 'windows'
if cc.get_argument_syntax() == 'msvc' if cc.get_argument_syntax() == 'msvc'
rc = find_program('rc', required: true) rc = find_program('rc', required: true)
rcgen_base_args += ['--rc', rc.full_path()] rcgen_base_args += ['--rc', rc.path()]
rcgen_outputs = ['@BASENAME@.rc', '@BASENAME@.res'] rcgen_outputs = ['@BASENAME@.rc', '@BASENAME@.res']
else else
windres = find_program('windres', required: true) windres = find_program('windres', required: true)
rcgen_base_args += ['--windres', windres.full_path()] rcgen_base_args += ['--windres', windres.path()]
rcgen_outputs = ['@BASENAME@.rc', '@BASENAME@.obj'] rcgen_outputs = ['@BASENAME@.rc', '@BASENAME@.obj']
endif endif
@ -3403,7 +3402,7 @@ foreach t1 : configure_files
potentially_conflicting_files += meson.current_build_dir() / t potentially_conflicting_files += meson.current_build_dir() / t
endforeach endforeach
foreach sub, fnames : generated_sources_ac foreach sub, fnames : generated_sources_ac
sub = meson.project_build_root() / sub sub = meson.build_root() / sub
foreach fname : fnames foreach fname : fnames
potentially_conflicting_files += sub / fname potentially_conflicting_files += sub / fname
endforeach endforeach
@ -3503,7 +3502,7 @@ run_target('install-test-files',
############################################################### ###############################################################
# DESTDIR for the installation we'll run tests in # DESTDIR for the installation we'll run tests in
test_install_destdir = meson.project_build_root() / 'tmp_install/' test_install_destdir = meson.build_root() / 'tmp_install/'
# DESTDIR + prefix appropriately munged # DESTDIR + prefix appropriately munged
if build_system != 'windows' if build_system != 'windows'
@ -3546,7 +3545,7 @@ test('install_test_files',
is_parallel: false, is_parallel: false,
suite: ['setup']) suite: ['setup'])
test_result_dir = meson.project_build_root() / 'testrun' test_result_dir = meson.build_root() / 'testrun'
# XXX: pg_regress doesn't assign unique ports on windows. To avoid the # XXX: pg_regress doesn't assign unique ports on windows. To avoid the
@ -3557,12 +3556,12 @@ testport = 40000
test_env = environment() test_env = environment()
test_initdb_template = meson.project_build_root() / 'tmp_install' / 'initdb-template' test_initdb_template = meson.build_root() / 'tmp_install' / 'initdb-template'
test_env.set('PG_REGRESS', pg_regress.full_path()) test_env.set('PG_REGRESS', pg_regress.full_path())
test_env.set('REGRESS_SHLIB', regress_module.full_path()) test_env.set('REGRESS_SHLIB', regress_module.full_path())
test_env.set('INITDB_TEMPLATE', test_initdb_template) test_env.set('INITDB_TEMPLATE', test_initdb_template)
# for Cluster.pm's portlock logic # for Cluster.pm's portlock logic
test_env.set('top_builddir', meson.project_build_root()) test_env.set('top_builddir', meson.build_root())
# Add the temporary installation to the library search path on platforms where # Add the temporary installation to the library search path on platforms where
# that works (everything but windows, basically). On windows everything # that works (everything but windows, basically). On windows everything
@ -3606,20 +3605,26 @@ sys.exit(sp.returncode)
# Test Generation # Test Generation
############################################################### ###############################################################
# Define a 'tmp_install' test setup (the default) that excludes tests # When using a meson version understanding exclude_suites, define a
# running against a pre-existing install and a 'running' setup that # 'tmp_install' test setup (the default) that excludes tests running against a
# conflicts with creation of the temporary installation and tap tests # pre-existing install and a 'running' setup that conflicts with creation of
# (which don't support running against a running server). # the temporary installation and tap tests (which don't support running
# against a running server).
running_suites = [] running_suites = []
install_suites = [] install_suites = []
if meson.version().version_compare('>=0.57')
runningcheck = true
else
runningcheck = false
endif
testwrap = files('src/tools/testwrap') testwrap = files('src/tools/testwrap')
foreach test_dir : tests foreach test_dir : tests
testwrap_base = [ testwrap_base = [
testwrap, testwrap,
'--basedir', meson.project_build_root(), '--basedir', meson.build_root(),
'--srcdir', test_dir['sd'], '--srcdir', test_dir['sd'],
# Some test suites are not run by default but can be run if selected by the # Some test suites are not run by default but can be run if selected by the
# user via variable PG_TEST_EXTRA. Pass configuration time value of # user via variable PG_TEST_EXTRA. Pass configuration time value of
@ -3709,7 +3714,7 @@ foreach test_dir : tests
install_suites += test_group install_suites += test_group
# some tests can't support running against running DB # some tests can't support running against running DB
if t.get('runningcheck', true) if runningcheck and t.get('runningcheck', true)
test(test_group_running / kind, test(test_group_running / kind,
python, python,
args: [ args: [
@ -3736,8 +3741,8 @@ foreach test_dir : tests
endif endif
test_command = [ test_command = [
perl.full_path(), perl.path(),
'-I', meson.project_source_root() / 'src/test/perl', '-I', meson.source_root() / 'src/test/perl',
'-I', test_dir['sd'], '-I', test_dir['sd'],
] ]
@ -3792,11 +3797,13 @@ foreach test_dir : tests
endforeach # directories with tests endforeach # directories with tests
# repeat condition so meson realizes version dependency # repeat condition so meson realizes version dependency
add_test_setup('tmp_install', if meson.version().version_compare('>=0.57')
add_test_setup('tmp_install',
is_default: true, is_default: true,
exclude_suites: running_suites) exclude_suites: running_suites)
add_test_setup('running', add_test_setup('running',
exclude_suites: ['setup'] + install_suites) exclude_suites: ['setup'] + install_suites)
endif
@ -3853,7 +3860,7 @@ tar_gz = custom_target('tar.gz',
'--format', 'tar.gz', '--format', 'tar.gz',
'-9', '-9',
'--prefix', distdir + '/', '--prefix', distdir + '/',
'-o', join_paths(meson.project_build_root(), '@OUTPUT@'), '-o', join_paths(meson.build_root(), '@OUTPUT@'),
pg_git_revision], pg_git_revision],
output: distdir + '.tar.gz', output: distdir + '.tar.gz',
) )
@ -3863,11 +3870,11 @@ if bzip2.found()
build_always_stale: true, build_always_stale: true,
command: [git, '-C', '@SOURCE_ROOT@', command: [git, '-C', '@SOURCE_ROOT@',
'-c', 'core.autocrlf=false', '-c', 'core.autocrlf=false',
'-c', 'tar.tar.bz2.command="@0@" -c'.format(bzip2.full_path()), '-c', 'tar.tar.bz2.command="@0@" -c'.format(bzip2.path()),
'archive', 'archive',
'--format', 'tar.bz2', '--format', 'tar.bz2',
'--prefix', distdir + '/', '--prefix', distdir + '/',
'-o', join_paths(meson.project_build_root(), '@OUTPUT@'), '-o', join_paths(meson.build_root(), '@OUTPUT@'),
pg_git_revision], pg_git_revision],
output: distdir + '.tar.bz2', output: distdir + '.tar.bz2',
) )
@ -3884,7 +3891,10 @@ alias_target('pgdist', [tar_gz, tar_bz2])
# But not if we are in a subproject, in case the parent project wants to # But not if we are in a subproject, in case the parent project wants to
# create a dist using the standard Meson command. # create a dist using the standard Meson command.
if not meson.is_subproject() if not meson.is_subproject()
# We can only pass the identifier perl here when we depend on >= 0.55
if meson.version().version_compare('>=0.55')
meson.add_dist_script(perl, '-e', 'exit 1') meson.add_dist_script(perl, '-e', 'exit 1')
endif
endif endif
@ -3893,7 +3903,9 @@ endif
# The End, The End, My Friend # The End, The End, My Friend
############################################################### ###############################################################
summary( if meson.version().version_compare('>=0.57')
summary(
{ {
'data block size': '@0@ kB'.format(cdata.get('BLCKSZ') / 1024), 'data block size': '@0@ kB'.format(cdata.get('BLCKSZ') / 1024),
'WAL block size': '@0@ kB'.format(cdata.get('XLOG_BLCKSZ') / 1024), 'WAL block size': '@0@ kB'.format(cdata.get('XLOG_BLCKSZ') / 1024),
@ -3902,26 +3914,26 @@ summary(
'@0@ GB'.format(get_option('segsize')), '@0@ GB'.format(get_option('segsize')),
}, },
section: 'Data layout', section: 'Data layout',
) )
summary( summary(
{ {
'host system': '@0@ @1@'.format(host_system, host_cpu), 'host system': '@0@ @1@'.format(host_system, host_cpu),
'build system': '@0@ @1@'.format(build_machine.system(), 'build system': '@0@ @1@'.format(build_machine.system(),
build_machine.cpu_family()), build_machine.cpu_family()),
}, },
section: 'System', section: 'System',
) )
summary( summary(
{ {
'linker': '@0@'.format(cc.get_linker_id()), 'linker': '@0@'.format(cc.get_linker_id()),
'C compiler': '@0@ @1@'.format(cc.get_id(), cc.version()), 'C compiler': '@0@ @1@'.format(cc.get_id(), cc.version()),
}, },
section: 'Compiler', section: 'Compiler',
) )
summary( summary(
{ {
'CPP FLAGS': ' '.join(cppflags), 'CPP FLAGS': ' '.join(cppflags),
'C FLAGS, functional': ' '.join(cflags), 'C FLAGS, functional': ' '.join(cflags),
@ -3931,9 +3943,9 @@ summary(
'LD FLAGS': ' '.join(ldflags + get_option('c_link_args')), 'LD FLAGS': ' '.join(ldflags + get_option('c_link_args')),
}, },
section: 'Compiler Flags', section: 'Compiler Flags',
) )
if llvm.found() if llvm.found()
summary( summary(
{ {
'C++ compiler': '@0@ @1@'.format(cpp.get_id(), cpp.version()), 'C++ compiler': '@0@ @1@'.format(cpp.get_id(), cpp.version()),
@ -3949,18 +3961,18 @@ if llvm.found()
}, },
section: 'Compiler Flags', section: 'Compiler Flags',
) )
endif endif
summary( summary(
{ {
'bison': '@0@ @1@'.format(bison.full_path(), bison_version), 'bison': '@0@ @1@'.format(bison.full_path(), bison_version),
'dtrace': dtrace, 'dtrace': dtrace,
'flex': '@0@ @1@'.format(flex.full_path(), flex_version), 'flex': '@0@ @1@'.format(flex.full_path(), flex_version),
}, },
section: 'Programs', section: 'Programs',
) )
summary( summary(
{ {
'bonjour': bonjour, 'bonjour': bonjour,
'bsd_auth': bsd_auth, 'bsd_auth': bsd_auth,
@ -3991,4 +4003,6 @@ summary(
}, },
section: 'External libraries', section: 'External libraries',
list_sep: ' ', list_sep: ' ',
) )
endif

View File

@ -431,7 +431,7 @@ static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis);
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page, BlockNumber blkno, Page page,
bool sharelock, Buffer vmbuffer); bool sharelock, Buffer vmbuffer);
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf, static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page, BlockNumber blkno, Page page,
Buffer vmbuffer, bool all_visible_according_to_vm, Buffer vmbuffer, bool all_visible_according_to_vm,
bool *has_lpdead_items, bool *vm_page_frozen); bool *has_lpdead_items, bool *vm_page_frozen);
@ -1245,7 +1245,6 @@ lazy_scan_heap(LVRelState *vacrel)
Buffer buf; Buffer buf;
Page page; Page page;
uint8 blk_info = 0; uint8 blk_info = 0;
int ndeleted = 0;
bool has_lpdead_items; bool has_lpdead_items;
void *per_buffer_data = NULL; void *per_buffer_data = NULL;
bool vm_page_frozen = false; bool vm_page_frozen = false;
@ -1388,7 +1387,7 @@ lazy_scan_heap(LVRelState *vacrel)
* line pointers previously marked LP_DEAD. * line pointers previously marked LP_DEAD.
*/ */
if (got_cleanup_lock) if (got_cleanup_lock)
ndeleted = lazy_scan_prune(vacrel, buf, blkno, page, lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, vmbuffer,
blk_info & VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM, blk_info & VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM,
&has_lpdead_items, &vm_page_frozen); &has_lpdead_items, &vm_page_frozen);
@ -1482,7 +1481,7 @@ lazy_scan_heap(LVRelState *vacrel)
* table has indexes. There will only be newly-freed space if we * table has indexes. There will only be newly-freed space if we
* held the cleanup lock and lazy_scan_prune() was called. * held the cleanup lock and lazy_scan_prune() was called.
*/ */
if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 && if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{ {
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@ -1937,10 +1936,8 @@ cmpOffsetNumbers(const void *a, const void *b)
* *vm_page_frozen is set to true if the page is newly set all-frozen in the * *vm_page_frozen is set to true if the page is newly set all-frozen in the
* VM. The caller currently only uses this for determining whether an eagerly * VM. The caller currently only uses this for determining whether an eagerly
* scanned page was successfully set all-frozen. * scanned page was successfully set all-frozen.
*
* Returns the number of tuples deleted from the page during HOT pruning.
*/ */
static int static void
lazy_scan_prune(LVRelState *vacrel, lazy_scan_prune(LVRelState *vacrel,
Buffer buf, Buffer buf,
BlockNumber blkno, BlockNumber blkno,
@ -2211,8 +2208,6 @@ lazy_scan_prune(LVRelState *vacrel,
*vm_page_frozen = true; *vm_page_frozen = true;
} }
} }
return presult.ndeleted;
} }
/* /*

View File

@ -16,7 +16,6 @@
#include "postgres.h" #include "postgres.h"
#include "access/nbtree.h" #include "access/nbtree.h"
#include "common/int.h"
#include "lib/qunique.h" #include "lib/qunique.h"
#include "utils/array.h" #include "utils/array.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
@ -57,8 +56,6 @@ static void _bt_skiparray_strat_decrement(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array); BTArrayKeyInfo *array);
static void _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk, static void _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array); BTArrayKeyInfo *array);
static void _bt_unmark_keys(IndexScanDesc scan, int *keyDataMap);
static int _bt_reorder_array_cmp(const void *a, const void *b);
static ScanKey _bt_preprocess_array_keys(IndexScanDesc scan, int *new_numberOfKeys); static ScanKey _bt_preprocess_array_keys(IndexScanDesc scan, int *new_numberOfKeys);
static void _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap); static void _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap);
static int _bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out, static int _bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,
@ -99,7 +96,7 @@ static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
* incomplete sets of cross-type operators, we may fail to detect redundant * incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.) * or contradictory keys, but we can survive that.)
* *
* Required output keys are sorted by index attribute. Presently we expect * The output keys must be sorted by index attribute. Presently we expect
* (but verify) that the input keys are already so sorted --- this is done * (but verify) that the input keys are already so sorted --- this is done
* by match_clauses_to_index() in indxpath.c. Some reordering of the keys * by match_clauses_to_index() in indxpath.c. Some reordering of the keys
* within each attribute may be done as a byproduct of the processing here. * within each attribute may be done as a byproduct of the processing here.
@ -130,36 +127,29 @@ static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
* This has the potential to be much more efficient than a full index scan * This has the potential to be much more efficient than a full index scan
* (though it behaves like a full scan when there's many distinct "x" values). * (though it behaves like a full scan when there's many distinct "x" values).
* *
* Typically, redundant keys are eliminated: we keep only the tightest * If possible, redundant keys are eliminated: we keep only the tightest
* >/>= bound and the tightest </<= bound, and if there's an = key then * >/>= bound and the tightest </<= bound, and if there's an = key then
* that's the only one returned. (So, we return either a single = key, * that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we * or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator, * cannot compare two keys for lack of a suitable cross-type operator,
* we cannot eliminate either key. * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
* works if we don't eliminate redundant keys.
* *
* When all redundant keys could not be eliminated, we'll output a key array * Note that one reason we need direction-sensitive required-key flags is
* that can more or less be treated as if it had no redundant keys. Suppose * precisely that we may not be able to eliminate redundant keys. Suppose
* we have "x > 4::int AND x > 10::bigint AND x < 70", and we are unable to * we have "x > 4::int AND x > 10::bigint", and we are unable to determine
* determine which > key is more restrictive for lack of a suitable cross-type * which key is more restrictive for lack of a suitable cross-type operator.
* operator. We'll arbitrarily pick one of the > keys; the other > key won't * _bt_first will arbitrarily pick one of the keys to do the initial
* be marked required. Obviously, the scan will be less efficient if we * positioning with. If it picks x > 4, then the x > 10 condition will fail
* choose x > 4 over x > 10 -- but it can still largely proceed as if there * until we reach index entries > 10; but we can't stop the scan just because
* was only a single > condition. "x > 10" will be placed at the end of the * x > 10 is failing. On the other hand, if we are scanning backwards, then
* so->keyData[] output array. It'll always be evaluated last, after the keys * failure of either key is indeed enough to stop the scan. (In general, when
* that could be marked required in the usual way (after "x > 4 AND x < 70"). * inequality keys are present, the initial-positioning code only promises to
* This can sometimes result in so->keyData[] keys that aren't even in index * position before the first possible match, not exactly at the first match,
* attribute order (if the qual involves multiple attributes). The scan's * for a forward scan; or after the last match for a backward scan.)
* required keys will still be in attribute order, though, so it can't matter.
*
* This scheme ensures that _bt_first always uses the same set of keys at the
* start of a forwards scan as those _bt_checkkeys uses to determine when to
* end a similar backwards scan (and vice-versa). _bt_advance_array_keys
* depends on this: it expects to be able to reliably predict what the next
* _bt_first call will do by testing whether _bt_checkkeys' routines report
* that the final tuple on the page is past the end of matches for the scan's
* keys with the scan direction flipped. If it is (if continuescan=false),
* then it follows that calling _bt_first will, at a minimum, relocate the
* scan to the very next leaf page (in the current scan direction).
* *
* As a byproduct of this work, we can detect contradictory quals such * As a byproduct of this work, we can detect contradictory quals such
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false, * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
@ -198,8 +188,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
int numberOfEqualCols; int numberOfEqualCols;
ScanKey inkeys; ScanKey inkeys;
BTScanKeyPreproc xform[BTMaxStrategyNumber]; BTScanKeyPreproc xform[BTMaxStrategyNumber];
bool test_result, bool test_result;
redundant_key_kept = false;
AttrNumber attno; AttrNumber attno;
ScanKey arrayKeyData; ScanKey arrayKeyData;
int *keyDataMap = NULL; int *keyDataMap = NULL;
@ -399,8 +388,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
xform[j].inkey = NULL; xform[j].inkey = NULL;
xform[j].inkeyi = -1; xform[j].inkeyi = -1;
} }
else /* else, cannot determine redundancy, keep both keys */
redundant_key_kept = true;
} }
/* track number of attrs for which we have "=" keys */ /* track number of attrs for which we have "=" keys */
numberOfEqualCols++; numberOfEqualCols++;
@ -421,8 +409,6 @@ _bt_preprocess_keys(IndexScanDesc scan)
else else
xform[BTLessStrategyNumber - 1].inkey = NULL; xform[BTLessStrategyNumber - 1].inkey = NULL;
} }
else
redundant_key_kept = true;
} }
/* try to keep only one of >, >= */ /* try to keep only one of >, >= */
@ -440,8 +426,6 @@ _bt_preprocess_keys(IndexScanDesc scan)
else else
xform[BTGreaterStrategyNumber - 1].inkey = NULL; xform[BTGreaterStrategyNumber - 1].inkey = NULL;
} }
else
redundant_key_kept = true;
} }
/* /*
@ -482,6 +466,25 @@ _bt_preprocess_keys(IndexScanDesc scan)
/* check strategy this key's operator corresponds to */ /* check strategy this key's operator corresponds to */
j = inkey->sk_strategy - 1; j = inkey->sk_strategy - 1;
/* if row comparison, push it directly to the output array */
if (inkey->sk_flags & SK_ROW_HEADER)
{
ScanKey outkey = &so->keyData[new_numberOfKeys++];
memcpy(outkey, inkey, sizeof(ScanKeyData));
if (arrayKeyData)
keyDataMap[new_numberOfKeys - 1] = i;
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
/*
* We don't support RowCompare using equality; such a qual would
* mess up the numberOfEqualCols tracking.
*/
Assert(j != (BTEqualStrategyNumber - 1));
continue;
}
if (inkey->sk_strategy == BTEqualStrategyNumber && if (inkey->sk_strategy == BTEqualStrategyNumber &&
(inkey->sk_flags & SK_SEARCHARRAY)) (inkey->sk_flags & SK_SEARCHARRAY))
{ {
@ -590,8 +593,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
* the new scan key. * the new scan key.
* *
* Note: We do things this way around so that our arrays are * Note: We do things this way around so that our arrays are
* always in the same order as their corresponding scan keys. * always in the same order as their corresponding scan keys,
* _bt_preprocess_array_keys_final expects this. * even with incomplete opfamilies. _bt_advance_array_keys
* depends on this.
*/ */
ScanKey outkey = &so->keyData[new_numberOfKeys++]; ScanKey outkey = &so->keyData[new_numberOfKeys++];
@ -603,7 +607,6 @@ _bt_preprocess_keys(IndexScanDesc scan)
xform[j].inkey = inkey; xform[j].inkey = inkey;
xform[j].inkeyi = i; xform[j].inkeyi = i;
xform[j].arrayidx = arrayidx; xform[j].arrayidx = arrayidx;
redundant_key_kept = true;
} }
} }
} }
@ -619,15 +622,6 @@ _bt_preprocess_keys(IndexScanDesc scan)
if (arrayKeyData) if (arrayKeyData)
_bt_preprocess_array_keys_final(scan, keyDataMap); _bt_preprocess_array_keys_final(scan, keyDataMap);
/*
* If there are remaining redundant inequality keys, we must make sure
* that each index attribute has no more than one required >/>= key, and
* no more than one required </<= key. Attributes that have one or more
* required = keys now must keep only one required key (the first = key).
*/
if (unlikely(redundant_key_kept) && so->qual_ok)
_bt_unmark_keys(scan, keyDataMap);
/* Could pfree arrayKeyData/keyDataMap now, but not worth the cycles */ /* Could pfree arrayKeyData/keyDataMap now, but not worth the cycles */
} }
@ -752,12 +746,9 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* *
* Depending on the operator type, the key may be required for both scan * Depending on the operator type, the key may be required for both scan
* directions or just one. Also, if the key is a row comparison header, * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In such * we have to mark its first subsidiary ScanKey as required. (Subsequent
* cases, the first subsidiary key is required, but subsequent ones are * subsidiary ScanKeys are normally for lower-order columns, and thus
* required only as long as they correspond to successive index columns and * cannot be required, since they're after the first non-equality scankey.)
* match the leading column as to sort direction. Otherwise the row
* comparison ordering is different from the index ordering and so we can't
* stop the scan on the basis of those lower-order columns.
* *
* Note: when we set required-key flag bits in a subsidiary scankey, we are * Note: when we set required-key flag bits in a subsidiary scankey, we are
* scribbling on a data structure belonging to the index AM's caller, not on * scribbling on a data structure belonging to the index AM's caller, not on
@ -795,25 +786,12 @@ _bt_mark_scankey_required(ScanKey skey)
if (skey->sk_flags & SK_ROW_HEADER) if (skey->sk_flags & SK_ROW_HEADER)
{ {
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument); ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
AttrNumber attno = skey->sk_attno;
/* First subkey should be same column/operator as the header */ /* First subkey should be same column/operator as the header */
Assert(subkey->sk_attno == attno);
Assert(subkey->sk_strategy == skey->sk_strategy);
for (;;)
{
Assert(subkey->sk_flags & SK_ROW_MEMBER); Assert(subkey->sk_flags & SK_ROW_MEMBER);
if (subkey->sk_attno != attno) Assert(subkey->sk_attno == skey->sk_attno);
break; /* non-adjacent key, so not required */ Assert(subkey->sk_strategy == skey->sk_strategy);
if (subkey->sk_strategy != skey->sk_strategy)
break; /* wrong direction, so not required */
subkey->sk_flags |= addflags; subkey->sk_flags |= addflags;
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
attno++;
}
} }
} }
@ -869,7 +847,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
cmp_op; cmp_op;
StrategyNumber strat; StrategyNumber strat;
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_MEMBER)); Assert(!((leftarg->sk_flags | rightarg->sk_flags) &
(SK_ROW_HEADER | SK_ROW_MEMBER)));
/* /*
* First, deal with cases where one or both args are NULL. This should * First, deal with cases where one or both args are NULL. This should
@ -945,16 +924,6 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
return true; return true;
} }
/*
* We don't yet know how to determine redundancy when it involves a row
* compare key (barring simple cases involving IS NULL/IS NOT NULL)
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_HEADER)
{
Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_BT_SKIP));
return false;
}
/* /*
* If either leftarg or rightarg are equality-type array scankeys, we need * If either leftarg or rightarg are equality-type array scankeys, we need
* specialized handling (since by now we know that IS NULL wasn't used) * specialized handling (since by now we know that IS NULL wasn't used)
@ -1498,283 +1467,6 @@ _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
} }
} }
/*
* _bt_unmark_keys() -- make superfluous required keys nonrequired after all
*
* When _bt_preprocess_keys fails to eliminate one or more redundant keys, it
* calls here to make sure that no index attribute has more than one > or >=
* key marked required, and no more than one required < or <= key. Attributes
* with = keys will always get one = key as their required key. All other
* keys that were initially marked required get "unmarked" here. That way,
* _bt_first and _bt_checkkeys will reliably agree on which keys to use to
* start and/or to end the scan.
*
* We also relocate keys that become/started out nonrequired to the end of
* so->keyData[]. That way, _bt_first and _bt_checkkeys cannot fail to reach
* a required key due to some earlier nonrequired key getting in the way.
*
* Only call here when _bt_compare_scankey_args returned false at least once
* (otherwise, calling here will just waste cycles).
*/
static void
_bt_unmark_keys(IndexScanDesc scan, int *keyDataMap)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
AttrNumber attno;
bool *unmarkikey;
int nunmark,
nunmarked,
nkept,
firsti;
ScanKey keepKeys,
unmarkKeys;
FmgrInfo *keepOrderProcs = NULL,
*unmarkOrderProcs = NULL;
bool haveReqEquals,
haveReqForward,
haveReqBackward;
/*
* Do an initial pass over so->keyData[] that determines which keys to
* keep as required. We expect so->keyData[] to still be in attribute
* order when we're called (though we don't expect any particular order
* among each attribute's keys).
*
* When both equality and inequality keys remain on a single attribute, we
* *must* make sure that exactly one of the equalities remains required.
* Any requiredness markings that we might leave on later keys/attributes
* are predicated on there being required = keys on all prior columns.
*/
unmarkikey = palloc0(so->numberOfKeys * sizeof(bool));
nunmark = 0;
/* Set things up for first key's attribute */
attno = so->keyData[0].sk_attno;
firsti = 0;
haveReqEquals = false;
haveReqForward = false;
haveReqBackward = false;
for (int i = 0; i < so->numberOfKeys; i++)
{
ScanKey origkey = &so->keyData[i];
if (origkey->sk_attno != attno)
{
/* Reset for next attribute */
attno = origkey->sk_attno;
firsti = i;
haveReqEquals = false;
haveReqForward = false;
haveReqBackward = false;
}
/* Equalities get priority over inequalities */
if (haveReqEquals)
{
/*
* We already found the first "=" key for this attribute. We've
* already decided that all its other keys will be unmarked.
*/
Assert(!(origkey->sk_flags & SK_SEARCHNULL));
unmarkikey[i] = true;
nunmark++;
continue;
}
else if ((origkey->sk_flags & SK_BT_REQFWD) &&
(origkey->sk_flags & SK_BT_REQBKWD))
{
/*
* Found the first "=" key for attno. All other attno keys will
* be unmarked.
*/
Assert(origkey->sk_strategy == BTEqualStrategyNumber);
haveReqEquals = true;
for (int j = firsti; j < i; j++)
{
/* Unmark any prior inequality keys on attno after all */
if (!unmarkikey[j])
{
unmarkikey[j] = true;
nunmark++;
}
}
continue;
}
/* Deal with inequalities next */
if ((origkey->sk_flags & SK_BT_REQFWD) && !haveReqForward)
{
haveReqForward = true;
continue;
}
else if ((origkey->sk_flags & SK_BT_REQBKWD) && !haveReqBackward)
{
haveReqBackward = true;
continue;
}
/*
* We have either a redundant inequality key that will be unmarked, or
* we have a key that wasn't marked required in the first place
*/
unmarkikey[i] = true;
nunmark++;
}
/* Should only be called when _bt_compare_scankey_args reported failure */
Assert(nunmark > 0);
/*
* Next, allocate temp arrays: one for required keys that'll remain
* required, the other for all remaining keys
*/
unmarkKeys = palloc(nunmark * sizeof(ScanKeyData));
keepKeys = palloc((so->numberOfKeys - nunmark) * sizeof(ScanKeyData));
nunmarked = 0;
nkept = 0;
if (so->numArrayKeys)
{
unmarkOrderProcs = palloc(nunmark * sizeof(FmgrInfo));
keepOrderProcs = palloc((so->numberOfKeys - nunmark) * sizeof(FmgrInfo));
}
/*
* Next, copy the contents of so->keyData[] into the appropriate temp
* array.
*
* Scans with = array keys need us to maintain invariants around the order
* of so->orderProcs[] and so->arrayKeys[] relative to so->keyData[]. See
* _bt_preprocess_array_keys_final for a full explanation.
*/
for (int i = 0; i < so->numberOfKeys; i++)
{
ScanKey origkey = &so->keyData[i];
ScanKey unmark;
if (!unmarkikey[i])
{
/*
* Key gets to keep its original requiredness markings.
*
* Key will stay in its original position, unless we're going to
* unmark an earlier key (in which case this key gets moved back).
*/
memcpy(keepKeys + nkept, origkey, sizeof(ScanKeyData));
if (so->numArrayKeys)
{
keyDataMap[i] = nkept;
memcpy(keepOrderProcs + nkept, &so->orderProcs[i],
sizeof(FmgrInfo));
}
nkept++;
continue;
}
/*
* Key will be unmarked as needed, and moved to the end of the array,
* next to other keys that will become (or always were) nonrequired
*/
unmark = unmarkKeys + nunmarked;
memcpy(unmark, origkey, sizeof(ScanKeyData));
if (so->numArrayKeys)
{
keyDataMap[i] = (so->numberOfKeys - nunmark) + nunmarked;
memcpy(&unmarkOrderProcs[nunmarked], &so->orderProcs[i],
sizeof(FmgrInfo));
}
/*
* Preprocessing only generates skip arrays when it knows that they'll
* be the only required = key on the attr. We'll never unmark them.
*/
Assert(!(unmark->sk_flags & SK_BT_SKIP));
/*
* Also shouldn't have to unmark an IS NULL or an IS NOT NULL key.
* They aren't cross-type, so an incomplete opfamily can't matter.
*/
Assert(!(unmark->sk_flags & SK_ISNULL) ||
!(unmark->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)));
/* Clear requiredness flags on redundant key (and on any subkeys) */
unmark->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
if (unmark->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(unmark->sk_argument);
Assert(subkey->sk_strategy == unmark->sk_strategy);
for (;;)
{
Assert(subkey->sk_flags & SK_ROW_MEMBER);
subkey->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
}
}
nunmarked++;
}
/* Copy both temp arrays back into so->keyData[] to reorder */
Assert(nkept == so->numberOfKeys - nunmark);
Assert(nunmarked == nunmark);
memcpy(so->keyData, keepKeys, sizeof(ScanKeyData) * nkept);
memcpy(so->keyData + nkept, unmarkKeys, sizeof(ScanKeyData) * nunmarked);
/* Done with temp arrays */
pfree(unmarkikey);
pfree(keepKeys);
pfree(unmarkKeys);
/*
* Now copy so->orderProcs[] temp entries needed by scans with = array
* keys back (just like with the so->keyData[] temp arrays)
*/
if (so->numArrayKeys)
{
memcpy(so->orderProcs, keepOrderProcs, sizeof(FmgrInfo) * nkept);
memcpy(so->orderProcs + nkept, unmarkOrderProcs,
sizeof(FmgrInfo) * nunmarked);
/* Also fix-up array->scan_key references */
for (int arridx = 0; arridx < so->numArrayKeys; arridx++)
{
BTArrayKeyInfo *array = &so->arrayKeys[arridx];
array->scan_key = keyDataMap[array->scan_key];
}
/*
* Sort so->arrayKeys[] based on its new BTArrayKeyInfo.scan_key
* offsets, so that its order matches so->keyData[] order as expected
*/
qsort(so->arrayKeys, so->numArrayKeys, sizeof(BTArrayKeyInfo),
_bt_reorder_array_cmp);
/* Done with temp arrays */
pfree(unmarkOrderProcs);
pfree(keepOrderProcs);
}
}
/*
* qsort comparator for reordering so->arrayKeys[] BTArrayKeyInfo entries
*/
static int
_bt_reorder_array_cmp(const void *a, const void *b)
{
BTArrayKeyInfo *arraya = (BTArrayKeyInfo *) a;
BTArrayKeyInfo *arrayb = (BTArrayKeyInfo *) b;
return pg_cmp_s32(arraya->scan_key, arrayb->scan_key);
}
/* /*
* _bt_preprocess_array_keys() -- Preprocess SK_SEARCHARRAY scan keys * _bt_preprocess_array_keys() -- Preprocess SK_SEARCHARRAY scan keys
* *

View File

@ -960,51 +960,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*---------- /*----------
* Examine the scan keys to discover where we need to start the scan. * Examine the scan keys to discover where we need to start the scan.
* The selected scan keys (at most one per index column) are remembered by
* storing their addresses into the local startKeys[] array. The final
* startKeys[] entry's strategy is set in strat_total. (Actually, there
* are a couple of cases where we force a less/more restrictive strategy.)
* *
* We must use the key that was marked required (in the direction opposite * We want to identify the keys that can be used as starting boundaries;
* our own scan's) during preprocessing. Each index attribute can only * these are =, >, or >= keys for a forward scan or =, <, <= keys for
* have one such required key. In general, the keys that we use to find * a backwards scan. We can use keys for multiple attributes so long as
* an initial position when scanning forwards are the same keys that end * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* the scan on the leaf level when scanning backwards (and vice-versa). * a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
* of what initial positioning strategy to use.
* *
* When the scan keys include cross-type operators, _bt_preprocess_keys * When the scan keys include cross-type operators, _bt_preprocess_keys
* may not be able to eliminate redundant keys; in such cases it will * may not be able to eliminate redundant keys; in such cases we will
* arbitrarily pick a usable key for each attribute (and scan direction), * arbitrarily pick a usable one for each attribute. This is correct
* ensuring that there is no more than one key required in each direction. * but possibly not optimal behavior. (For example, with keys like
* We stop considering further keys once we reach the first nonrequired * "x >= 4 AND x >= 5" we would elect to scan starting at x=4 when
* key (which must come after all required keys), so this can't affect us. * x=5 would be more efficient.) Since the situation only arises given
* a poorly-worded query plus an incomplete opfamily, live with it.
* *
* The required keys that we use as starting boundaries have to be =, >, * When both equality and inequality keys appear for a single attribute
* or >= keys for a forward scan or =, <, <= keys for a backwards scan. * (again, only possible when cross-type operators appear), we *must*
* We can use keys for multiple attributes so long as the prior attributes * select one of the equality keys for the starting point, because
* had only =, >= (resp. =, <=) keys. These rules are very similar to the * _bt_checkkeys() will stop the scan as soon as an equality qual fails.
* rules that preprocessing used to determine which keys to mark required. * For example, if we have keys like "x >= 4 AND x = 10" and we elect to
* We cannot always use every required key as a positioning key, though. * start at x=4, we will fail and stop before reaching x=10. If multiple
* Skip arrays necessitate independently applying our own rules here. * equality quals survive preprocessing, however, it doesn't matter which
* Skip arrays are always generally considered = array keys, but we'll * one we use --- by definition, they are either redundant or
* nevertheless treat them as inequalities at certain points of the scan. * contradictory.
* When that happens, it _might_ have implications for the number of
* required keys that we can safely use for initial positioning purposes.
* *
* For example, a forward scan with a skip array on its leading attribute * In practice we rarely see any "attribute boundary key gaps" here.
* (with no low_compare/high_compare) will have at least two required scan * Preprocessing can usually backfill skip array keys for any attributes
* keys, but we won't use any of them as boundary keys during the scan's * that were omitted from the original scan->keyData[] input keys. All
* initial call here. Our positioning key during the first call here can * array keys are always considered = keys, but we'll sometimes need to
* be thought of as representing "> -infinity". Similarly, if such a skip * treat the current key value as if we were using an inequality strategy.
* array's low_compare is "a > 'foo'", then we position using "a > 'foo'" * This happens with range skip arrays, which store inequality keys in the
* during the scan's initial call here; a lower-order key such as "b = 42" * array's low_compare/high_compare fields (used to find the first/last
* can't be used until the "a" array advances beyond MINVAL/low_compare. * set of matches, when = key will lack a usable sk_argument value).
* * These are always preferred over any redundant "standard" inequality
* On the other hand, if such a skip array's low_compare was "a >= 'foo'", * keys on the same column (per the usual rule about preferring = keys).
* then we _can_ use "a >= 'foo' AND b = 42" during the initial call here. * Note also that any column with an = skip array key can never have an
* A subsequent call here might have us use "a = 'fop' AND b = 42". Note * additional, contradictory = key.
* that we treat = and >= as equivalent when scanning forwards (just as we
* treat = and <= as equivalent when scanning backwards). We effectively
* do the same thing (though with a distinct "a" element/value) each time.
* *
* All keys (with the exception of SK_SEARCHNULL keys and SK_BT_SKIP * All keys (with the exception of SK_SEARCHNULL keys and SK_BT_SKIP
* array keys whose array is "null_elem=true") imply a NOT NULL qualifier. * array keys whose array is "null_elem=true") imply a NOT NULL qualifier.
@ -1016,20 +1011,21 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* traversing a lot of null entries at the start of the scan. * traversing a lot of null entries at the start of the scan.
* *
* In this loop, row-comparison keys are treated the same as keys on their * In this loop, row-comparison keys are treated the same as keys on their
* first (leftmost) columns. We'll add all lower-order columns of the row * first (leftmost) columns. We'll add on lower-order columns of the row
* comparison that were marked required during preprocessing below. * comparison below, if possible.
* *
* _bt_advance_array_keys needs to know exactly how we'll reposition the * The selected scan keys (at most one per index column) are remembered by
* scan (should it opt to schedule another primitive index scan). It is * storing their addresses into the local startKeys[] array.
* critical that primscans only be scheduled when they'll definitely make *
* some useful progress. _bt_advance_array_keys does this by calling * _bt_checkkeys/_bt_advance_array_keys decide whether and when to start
* _bt_checkkeys routines that report whether a tuple is past the end of * the next primitive index scan (for scans with array keys) based in part
* matches for the scan's keys (given the scan's current array elements). * on an understanding of how it'll enable us to reposition the scan.
* If the page's final tuple is "after the end of matches" for a scan that * They're directly aware of how we'll sometimes cons up an explicit
* uses the *opposite* scan direction, then it must follow that it's also * SK_SEARCHNOTNULL key. They'll even end primitive scans by applying a
* "before the start of matches" for the actual current scan direction. * symmetric "deduce NOT NULL" rule of their own. This allows top-level
* It is therefore essential that all of our initial positioning rules are * scans to skip large groups of NULLs through repeated deductions about
* symmetric with _bt_checkkeys's corresponding continuescan=false rule. * key strictness (for a required inequality key) and whether NULLs in the
* key's index column are stored last or first (relative to non-NULLs).
* If you update anything here, _bt_checkkeys/_bt_advance_array_keys might * If you update anything here, _bt_checkkeys/_bt_advance_array_keys might
* need to be kept in sync. * need to be kept in sync.
*---------- *----------
@ -1038,17 +1034,18 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (so->numberOfKeys > 0) if (so->numberOfKeys > 0)
{ {
AttrNumber curattr; AttrNumber curattr;
ScanKey bkey; ScanKey chosen;
ScanKey impliesNN; ScanKey impliesNN;
ScanKey cur; ScanKey cur;
/* /*
* bkey will be set to the key that preprocessing left behind as the * chosen is the so-far-chosen key for the current attribute, if any.
* boundary key for this attribute, in this scan direction (if any) * We don't cast the decision in stone until we reach keys for the
* next attribute.
*/ */
cur = so->keyData; cur = so->keyData;
curattr = 1; curattr = 1;
bkey = NULL; chosen = NULL;
/* Also remember any scankey that implies a NOT NULL constraint */ /* Also remember any scankey that implies a NOT NULL constraint */
impliesNN = NULL; impliesNN = NULL;
@ -1061,29 +1058,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{ {
if (i >= so->numberOfKeys || cur->sk_attno != curattr) if (i >= so->numberOfKeys || cur->sk_attno != curattr)
{ {
/* Done looking for the curattr boundary key */
Assert(bkey == NULL ||
(bkey->sk_attno == curattr &&
(bkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))));
Assert(impliesNN == NULL ||
(impliesNN->sk_attno == curattr &&
(impliesNN->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))));
/* /*
* Done looking at keys for curattr.
*
* If this is a scan key for a skip array whose current * If this is a scan key for a skip array whose current
* element is MINVAL, choose low_compare (when scanning * element is MINVAL, choose low_compare (when scanning
* backwards it'll be MAXVAL, and we'll choose high_compare). * backwards it'll be MAXVAL, and we'll choose high_compare).
* *
* Note: if the array's low_compare key makes 'bkey' NULL, * Note: if the array's low_compare key makes 'chosen' NULL,
* then we behave as if the array's first element is -inf, * then we behave as if the array's first element is -inf,
* except when !array->null_elem implies a usable NOT NULL * except when !array->null_elem implies a usable NOT NULL
* constraint. * constraint.
*/ */
if (bkey != NULL && if (chosen != NULL &&
(bkey->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL))) (chosen->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)))
{ {
int ikey = bkey - so->keyData; int ikey = chosen - so->keyData;
ScanKey skipequalitykey = bkey; ScanKey skipequalitykey = chosen;
BTArrayKeyInfo *array = NULL; BTArrayKeyInfo *array = NULL;
for (int arridx = 0; arridx < so->numArrayKeys; arridx++) for (int arridx = 0; arridx < so->numArrayKeys; arridx++)
@ -1096,35 +1087,35 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsForward(dir)) if (ScanDirectionIsForward(dir))
{ {
Assert(!(skipequalitykey->sk_flags & SK_BT_MAXVAL)); Assert(!(skipequalitykey->sk_flags & SK_BT_MAXVAL));
bkey = array->low_compare; chosen = array->low_compare;
} }
else else
{ {
Assert(!(skipequalitykey->sk_flags & SK_BT_MINVAL)); Assert(!(skipequalitykey->sk_flags & SK_BT_MINVAL));
bkey = array->high_compare; chosen = array->high_compare;
} }
Assert(bkey == NULL || Assert(chosen == NULL ||
bkey->sk_attno == skipequalitykey->sk_attno); chosen->sk_attno == skipequalitykey->sk_attno);
if (!array->null_elem) if (!array->null_elem)
impliesNN = skipequalitykey; impliesNN = skipequalitykey;
else else
Assert(bkey == NULL && impliesNN == NULL); Assert(chosen == NULL && impliesNN == NULL);
} }
/* /*
* If we didn't find a usable boundary key, see if we can * If we didn't find a usable boundary key, see if we can
* deduce a NOT NULL key * deduce a NOT NULL key
*/ */
if (bkey == NULL && impliesNN != NULL && if (chosen == NULL && impliesNN != NULL &&
((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ? ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
ScanDirectionIsForward(dir) : ScanDirectionIsForward(dir) :
ScanDirectionIsBackward(dir))) ScanDirectionIsBackward(dir)))
{ {
/* Yes, so build the key in notnullkeys[keysz] */ /* Yes, so build the key in notnullkeys[keysz] */
bkey = &notnullkeys[keysz]; chosen = &notnullkeys[keysz];
ScanKeyEntryInitialize(bkey, ScanKeyEntryInitialize(chosen,
(SK_SEARCHNOTNULL | SK_ISNULL | (SK_SEARCHNOTNULL | SK_ISNULL |
(impliesNN->sk_flags & (impliesNN->sk_flags &
(SK_BT_DESC | SK_BT_NULLS_FIRST))), (SK_BT_DESC | SK_BT_NULLS_FIRST))),
@ -1139,12 +1130,12 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
} }
/* /*
* If preprocessing didn't leave a usable boundary key, quit; * If we still didn't find a usable boundary key, quit; else
* else save the boundary key pointer in startKeys[] * save the boundary key pointer in startKeys.
*/ */
if (bkey == NULL) if (chosen == NULL)
break; break;
startKeys[keysz++] = bkey; startKeys[keysz++] = chosen;
/* /*
* We can only consider adding more boundary keys when the one * We can only consider adding more boundary keys when the one
@ -1152,7 +1143,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* (during backwards scans we can only do so when the key that * (during backwards scans we can only do so when the key that
* we just added to startKeys[] uses the = or <= strategy) * we just added to startKeys[] uses the = or <= strategy)
*/ */
strat_total = bkey->sk_strategy; strat_total = chosen->sk_strategy;
if (strat_total == BTGreaterStrategyNumber || if (strat_total == BTGreaterStrategyNumber ||
strat_total == BTLessStrategyNumber) strat_total == BTLessStrategyNumber)
break; break;
@ -1163,19 +1154,19 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* make strat_total > or < (and stop adding boundary keys). * make strat_total > or < (and stop adding boundary keys).
* This can only happen with opclasses that lack skip support. * This can only happen with opclasses that lack skip support.
*/ */
if (bkey->sk_flags & (SK_BT_NEXT | SK_BT_PRIOR)) if (chosen->sk_flags & (SK_BT_NEXT | SK_BT_PRIOR))
{ {
Assert(bkey->sk_flags & SK_BT_SKIP); Assert(chosen->sk_flags & SK_BT_SKIP);
Assert(strat_total == BTEqualStrategyNumber); Assert(strat_total == BTEqualStrategyNumber);
if (ScanDirectionIsForward(dir)) if (ScanDirectionIsForward(dir))
{ {
Assert(!(bkey->sk_flags & SK_BT_PRIOR)); Assert(!(chosen->sk_flags & SK_BT_PRIOR));
strat_total = BTGreaterStrategyNumber; strat_total = BTGreaterStrategyNumber;
} }
else else
{ {
Assert(!(bkey->sk_flags & SK_BT_NEXT)); Assert(!(chosen->sk_flags & SK_BT_NEXT));
strat_total = BTLessStrategyNumber; strat_total = BTLessStrategyNumber;
} }
@ -1189,30 +1180,24 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* /*
* Done if that was the last scan key output by preprocessing. * Done if that was the last scan key output by preprocessing.
* Also done if we've now examined all keys marked required. * Also done if there is a gap index attribute that lacks a
* usable key (only possible when preprocessing was unable to
* generate a skip array key to "fill in the gap").
*/ */
if (i >= so->numberOfKeys || if (i >= so->numberOfKeys ||
!(cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))) cur->sk_attno != curattr + 1)
break; break;
/* /*
* Reset for next attr. * Reset for next attr.
*/ */
Assert(cur->sk_attno == curattr + 1);
curattr = cur->sk_attno; curattr = cur->sk_attno;
bkey = NULL; chosen = NULL;
impliesNN = NULL; impliesNN = NULL;
} }
/* /*
* If we've located the starting boundary key for curattr, we have * Can we use this key as a starting boundary for this attr?
* no interest in curattr's other required key
*/
if (bkey != NULL)
continue;
/*
* Is this key the starting boundary key for curattr?
* *
* If not, does it imply a NOT NULL constraint? (Because * If not, does it imply a NOT NULL constraint? (Because
* SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber, * SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber,
@ -1222,20 +1207,27 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{ {
case BTLessStrategyNumber: case BTLessStrategyNumber:
case BTLessEqualStrategyNumber: case BTLessEqualStrategyNumber:
if (chosen == NULL)
{
if (ScanDirectionIsBackward(dir)) if (ScanDirectionIsBackward(dir))
bkey = cur; chosen = cur;
else if (impliesNN == NULL) else
impliesNN = cur; impliesNN = cur;
}
break; break;
case BTEqualStrategyNumber: case BTEqualStrategyNumber:
bkey = cur; /* override any non-equality choice */
chosen = cur;
break; break;
case BTGreaterEqualStrategyNumber: case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber: case BTGreaterStrategyNumber:
if (chosen == NULL)
{
if (ScanDirectionIsForward(dir)) if (ScanDirectionIsForward(dir))
bkey = cur; chosen = cur;
else if (impliesNN == NULL) else
impliesNN = cur; impliesNN = cur;
}
break; break;
} }
} }
@ -1261,18 +1253,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
Assert(keysz <= INDEX_MAX_KEYS); Assert(keysz <= INDEX_MAX_KEYS);
for (int i = 0; i < keysz; i++) for (int i = 0; i < keysz; i++)
{ {
ScanKey bkey = startKeys[i]; ScanKey cur = startKeys[i];
Assert(bkey->sk_attno == i + 1); Assert(cur->sk_attno == i + 1);
if (bkey->sk_flags & SK_ROW_HEADER) if (cur->sk_flags & SK_ROW_HEADER)
{ {
/* /*
* Row comparison header: look to the first row member instead * Row comparison header: look to the first row member instead
*/ */
ScanKey subkey = (ScanKey) DatumGetPointer(bkey->sk_argument); ScanKey subkey = (ScanKey) DatumGetPointer(cur->sk_argument);
bool loosen_strat = false,
tighten_strat = false;
/* /*
* Cannot be a NULL in the first row member: _bt_preprocess_keys * Cannot be a NULL in the first row member: _bt_preprocess_keys
@ -1280,18 +1270,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* ever getting this far * ever getting this far
*/ */
Assert(subkey->sk_flags & SK_ROW_MEMBER); Assert(subkey->sk_flags & SK_ROW_MEMBER);
Assert(subkey->sk_attno == bkey->sk_attno); Assert(subkey->sk_attno == cur->sk_attno);
Assert(!(subkey->sk_flags & SK_ISNULL)); Assert(!(subkey->sk_flags & SK_ISNULL));
/*
* This is either a > or >= key (during backwards scans it is
* either < or <=) that was marked required during preprocessing.
* Later so->keyData[] keys can't have been marked required, so
* our row compare header key must be the final startKeys[] entry.
*/
Assert(subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD));
Assert(i == keysz - 1);
/* /*
* The member scankeys are already in insertion format (ie, they * The member scankeys are already in insertion format (ie, they
* have sk_func = 3-way-comparison function) * have sk_func = 3-way-comparison function)
@ -1299,63 +1280,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData)); memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData));
/* /*
* Now look to later row compare members. * If the row comparison is the last positioning key we accepted,
* * try to add additional keys from the lower-order row members.
* If there's an "index attribute gap" between two row compare * (If we accepted independent conditions on additional index
* members, the second member won't have been marked required, and * columns, we use those instead --- doesn't seem worth trying to
* so can't be used as a starting boundary key here. The part of * determine which is more restrictive.) Note that this is OK
* the row comparison that we do still use has to be treated as a * even if the row comparison is of ">" or "<" type, because the
* ">=" or "<=" condition. For example, a qual "(a, c) > (1, 42)" * condition applied to all but the last row member is effectively
* with an omitted intervening index attribute "b" will use an * ">=" or "<=", and so the extra keys don't break the positioning
* insertion scan key "a >= 1". Even the first "a = 1" tuple on * scheme. But, by the same token, if we aren't able to use all
* the leaf level might satisfy the row compare qual. * the row members, then the part of the row comparison that we
* * did use has to be treated as just a ">=" or "<=" condition, and
* We're able to use a _more_ restrictive strategy when we reach a * so we'd better adjust strat_total accordingly.
* NULL row compare member, since they're always unsatisfiable.
* For example, a qual "(a, b, c) >= (1, NULL, 77)" will use an
* insertion scan key "a > 1". All tuples where "a = 1" cannot
* possibly satisfy the row compare qual, so this is safe.
*/ */
if (i == keysz - 1)
{
bool used_all_subkeys = false;
Assert(!(subkey->sk_flags & SK_ROW_END)); Assert(!(subkey->sk_flags & SK_ROW_END));
for (;;) for (;;)
{ {
subkey++; subkey++;
Assert(subkey->sk_flags & SK_ROW_MEMBER); Assert(subkey->sk_flags & SK_ROW_MEMBER);
if (subkey->sk_attno != keysz + 1)
break; /* out-of-sequence, can't use it */
if (subkey->sk_strategy != cur->sk_strategy)
break; /* wrong direction, can't use it */
if (subkey->sk_flags & SK_ISNULL) if (subkey->sk_flags & SK_ISNULL)
{ break; /* can't use null keys */
/*
* NULL member key, can only use earlier keys.
*
* We deliberately avoid checking if this key is marked
* required. All earlier keys are required, and this key
* is unsatisfiable either way, so we can't miss anything.
*/
tighten_strat = true;
break;
}
if (!(subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
{
/* nonrequired member key, can only use earlier keys */
loosen_strat = true;
break;
}
Assert(subkey->sk_attno == keysz + 1);
Assert(subkey->sk_strategy == bkey->sk_strategy);
Assert(keysz < INDEX_MAX_KEYS); Assert(keysz < INDEX_MAX_KEYS);
memcpy(inskey.scankeys + keysz, subkey, memcpy(inskey.scankeys + keysz, subkey,
sizeof(ScanKeyData)); sizeof(ScanKeyData));
keysz++; keysz++;
if (subkey->sk_flags & SK_ROW_END) if (subkey->sk_flags & SK_ROW_END)
{
used_all_subkeys = true;
break; break;
} }
Assert(!(loosen_strat && tighten_strat)); }
if (loosen_strat) if (!used_all_subkeys)
{ {
/* Use less restrictive strategy (and fewer member keys) */
switch (strat_total) switch (strat_total)
{ {
case BTLessStrategyNumber: case BTLessStrategyNumber:
@ -1366,54 +1330,40 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
} }
} }
if (tighten_strat) break; /* done with outer loop */
}
}
else
{ {
/* Use more restrictive strategy (and fewer member keys) */
switch (strat_total)
{
case BTLessEqualStrategyNumber:
strat_total = BTLessStrategyNumber;
break;
case BTGreaterEqualStrategyNumber:
strat_total = BTGreaterStrategyNumber;
break;
}
}
/* done adding to inskey (row comparison keys always come last) */
break;
}
/* /*
* Ordinary comparison key/search-style key. * Ordinary comparison key. Transform the search-style scan key
* to an insertion scan key by replacing the sk_func with the
* appropriate btree comparison function.
* *
* Transform the search-style scan key to an insertion scan key by * If scankey operator is not a cross-type comparison, we can use
* replacing the sk_func with the appropriate btree 3-way-comparison * the cached comparison function; otherwise gotta look it up in
* function. * the catalogs. (That can't lead to infinite recursion, since no
*
* If scankey operator is not a cross-type comparison, we can use the
* cached comparison function; otherwise gotta look it up in the
* catalogs. (That can't lead to infinite recursion, since no
* indexscan initiated by syscache lookup will use cross-data-type * indexscan initiated by syscache lookup will use cross-data-type
* operators.) * operators.)
* *
* We support the convention that sk_subtype == InvalidOid means the * We support the convention that sk_subtype == InvalidOid means
* opclass input type; this hack simplifies life for ScanKeyInit(). * the opclass input type; this is a hack to simplify life for
* ScanKeyInit().
*/ */
if (bkey->sk_subtype == rel->rd_opcintype[i] || if (cur->sk_subtype == rel->rd_opcintype[i] ||
bkey->sk_subtype == InvalidOid) cur->sk_subtype == InvalidOid)
{ {
FmgrInfo *procinfo; FmgrInfo *procinfo;
procinfo = index_getprocinfo(rel, bkey->sk_attno, BTORDER_PROC); procinfo = index_getprocinfo(rel, cur->sk_attno, BTORDER_PROC);
ScanKeyEntryInitializeWithInfo(inskey.scankeys + i, ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
bkey->sk_flags, cur->sk_flags,
bkey->sk_attno, cur->sk_attno,
InvalidStrategy, InvalidStrategy,
bkey->sk_subtype, cur->sk_subtype,
bkey->sk_collation, cur->sk_collation,
procinfo, procinfo,
bkey->sk_argument); cur->sk_argument);
} }
else else
{ {
@ -1421,19 +1371,21 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
cmp_proc = get_opfamily_proc(rel->rd_opfamily[i], cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
rel->rd_opcintype[i], rel->rd_opcintype[i],
bkey->sk_subtype, BTORDER_PROC); cur->sk_subtype,
BTORDER_PROC);
if (!RegProcedureIsValid(cmp_proc)) if (!RegProcedureIsValid(cmp_proc))
elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"", elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
BTORDER_PROC, rel->rd_opcintype[i], bkey->sk_subtype, BTORDER_PROC, rel->rd_opcintype[i], cur->sk_subtype,
bkey->sk_attno, RelationGetRelationName(rel)); cur->sk_attno, RelationGetRelationName(rel));
ScanKeyEntryInitialize(inskey.scankeys + i, ScanKeyEntryInitialize(inskey.scankeys + i,
bkey->sk_flags, cur->sk_flags,
bkey->sk_attno, cur->sk_attno,
InvalidStrategy, InvalidStrategy,
bkey->sk_subtype, cur->sk_subtype,
bkey->sk_collation, cur->sk_collation,
cmp_proc, cmp_proc,
bkey->sk_argument); cur->sk_argument);
}
} }
} }
@ -1522,8 +1474,6 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (!BufferIsValid(so->currPos.buf)) if (!BufferIsValid(so->currPos.buf))
{ {
Assert(!so->needPrimScan);
/* /*
* We only get here if the index is completely empty. Lock relation * We only get here if the index is completely empty. Lock relation
* because nothing finer to lock exists. Without a buffer lock, it's * because nothing finer to lock exists. Without a buffer lock, it's
@ -1542,6 +1492,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (!BufferIsValid(so->currPos.buf)) if (!BufferIsValid(so->currPos.buf))
{ {
Assert(!so->needPrimScan);
_bt_parallel_done(scan); _bt_parallel_done(scan);
return false; return false;
} }

View File

@ -44,6 +44,7 @@ static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *arra
static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array); static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir, static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
bool *skip_array_set); bool *skip_array_set);
static void _bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir);
static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir, static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
IndexTuple tuple, TupleDesc tupdesc, int tupnatts, IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
bool readpagetup, int sktrig, bool *scanBehind); bool readpagetup, int sktrig, bool *scanBehind);
@ -51,6 +52,7 @@ static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
IndexTuple tuple, int tupnatts, TupleDesc tupdesc, IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
int sktrig, bool sktrig_required); int sktrig, bool sktrig_required);
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
static bool _bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir);
static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan); static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
#endif #endif
static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir, static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
@ -1032,6 +1034,73 @@ _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
return false; return false;
} }
/*
* _bt_rewind_nonrequired_arrays() -- Rewind SAOP arrays not marked required
*
* Called when _bt_advance_array_keys decides to start a new primitive index
* scan on the basis of the current scan position being before the position
* that _bt_first is capable of repositioning the scan to by applying an
* inequality operator required in the opposite-to-scan direction only.
*
* Although equality strategy scan keys (for both arrays and non-arrays alike)
* are either marked required in both directions or in neither direction,
* there is a sense in which non-required arrays behave like required arrays.
* With a qual such as "WHERE a IN (100, 200) AND b >= 3 AND c IN (5, 6, 7)",
* the scan key on "c" is non-required, but nevertheless enables positioning
* the scan at the first tuple >= "(100, 3, 5)" on the leaf level during the
* first descent of the tree by _bt_first. Later on, there could also be a
* second descent, that places the scan right before tuples >= "(200, 3, 5)".
* _bt_first must never be allowed to build an insertion scan key whose "c"
* entry is set to a value other than 5, the "c" array's first element/value.
* (Actually, it's the first in the current scan direction. This example uses
* a forward scan.)
*
* Calling here resets the array scan key elements for the scan's non-required
* arrays. This is strictly necessary for correctness in a subset of cases
* involving "required in opposite direction"-triggered primitive index scans.
* Not all callers are at risk of _bt_first using a non-required array like
* this, but advancement always resets the arrays when another primitive scan
* is scheduled, just to keep things simple. Array advancement even makes
* sure to reset non-required arrays during scans that have no inequalities.
* (Advancement still won't call here when there are no inequalities, though
* that's just because it's all handled indirectly instead.)
*
* Note: _bt_verify_arrays_bt_first is called by an assertion to enforce that
* everybody got this right.
*
* Note: In practice almost all SAOP arrays are marked required during
* preprocessing (if necessary by generating skip arrays). It is hardly ever
* truly necessary to call here, but consistently doing so is simpler.
*/
static void
_bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir)
{
Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
int arrayidx = 0;
for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
{
ScanKey cur = so->keyData + ikey;
BTArrayKeyInfo *array = NULL;
if (!(cur->sk_flags & SK_SEARCHARRAY) ||
cur->sk_strategy != BTEqualStrategyNumber)
continue;
array = &so->arrayKeys[arrayidx++];
Assert(array->scan_key == ikey);
if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
continue;
Assert(array->num_elems != -1); /* No non-required skip arrays */
_bt_array_set_low_or_high(rel, cur, array,
ScanDirectionIsForward(dir));
}
}
/* /*
* _bt_tuple_before_array_skeys() -- too early to advance required arrays? * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
* *
@ -1311,6 +1380,8 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
*/ */
if (so->needPrimScan) if (so->needPrimScan)
{ {
Assert(_bt_verify_arrays_bt_first(scan, dir));
/* /*
* Flag was set -- must call _bt_first again, which will reset the * Flag was set -- must call _bt_first again, which will reset the
* scan's needPrimScan flag * scan's needPrimScan flag
@ -1936,7 +2007,14 @@ _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
*/ */
else if (has_required_opposite_direction_only && pstate->finaltup && else if (has_required_opposite_direction_only && pstate->finaltup &&
unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup))) unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
{
/*
* Make sure that any SAOP arrays that were not marked required by
* preprocessing are reset to their first element for this direction
*/
_bt_rewind_nonrequired_arrays(scan, dir);
goto new_prim_scan; goto new_prim_scan;
}
continue_scan: continue_scan:
@ -1967,6 +2045,8 @@ continue_scan:
*/ */
so->oppositeDirCheck = has_required_opposite_direction_only; so->oppositeDirCheck = has_required_opposite_direction_only;
_bt_rewind_nonrequired_arrays(scan, dir);
/* /*
* skip by setting "look ahead" mechanism's offnum for forwards scans * skip by setting "look ahead" mechanism's offnum for forwards scans
* (backwards scans check scanBehind flag directly instead) * (backwards scans check scanBehind flag directly instead)
@ -2062,6 +2142,48 @@ end_toplevel_scan:
} }
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
/*
* Verify that the scan's qual state matches what we expect at the point that
* _bt_start_prim_scan is about to start a just-scheduled new primitive scan.
*
* We enforce a rule against non-required array scan keys: they must start out
* with whatever element is the first for the scan's current scan direction.
* See _bt_rewind_nonrequired_arrays comments for an explanation.
*/
static bool
_bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
int arrayidx = 0;
for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
{
ScanKey cur = so->keyData + ikey;
BTArrayKeyInfo *array = NULL;
int first_elem_dir;
if (!(cur->sk_flags & SK_SEARCHARRAY) ||
cur->sk_strategy != BTEqualStrategyNumber)
continue;
array = &so->arrayKeys[arrayidx++];
if (((cur->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
((cur->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
continue;
if (ScanDirectionIsForward(dir))
first_elem_dir = 0;
else
first_elem_dir = array->num_elems - 1;
if (array->cur_elem != first_elem_dir)
return false;
}
return _bt_verify_keys_with_arraykeys(scan);
}
/* /*
* Verify that the scan's "so->keyData[]" scan keys are in agreement with * Verify that the scan's "so->keyData[]" scan keys are in agreement with
* its array key state * its array key state
@ -2072,7 +2194,6 @@ _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
BTScanOpaque so = (BTScanOpaque) scan->opaque; BTScanOpaque so = (BTScanOpaque) scan->opaque;
int last_sk_attno = InvalidAttrNumber, int last_sk_attno = InvalidAttrNumber,
arrayidx = 0; arrayidx = 0;
bool nonrequiredseen = false;
if (!so->qual_ok) if (!so->qual_ok)
return false; return false;
@ -2096,16 +2217,8 @@ _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
if (array->num_elems != -1 && if (array->num_elems != -1 &&
cur->sk_argument != array->elem_values[array->cur_elem]) cur->sk_argument != array->elem_values[array->cur_elem])
return false; return false;
if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
{
if (last_sk_attno > cur->sk_attno) if (last_sk_attno > cur->sk_attno)
return false; return false;
if (nonrequiredseen)
return false;
}
else
nonrequiredseen = true;
last_sk_attno = cur->sk_attno; last_sk_attno = cur->sk_attno;
} }
@ -2438,12 +2551,37 @@ _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
if (!(key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))) if (!(key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
{ {
/* Scan key isn't marked required (corner case) */ /* Scan key isn't marked required (corner case) */
Assert(!(key->sk_flags & SK_ROW_HEADER));
break; /* unsafe */ break; /* unsafe */
} }
if (key->sk_flags & SK_ROW_HEADER) if (key->sk_flags & SK_ROW_HEADER)
{ {
/* RowCompare inequalities currently aren't supported */ /*
break; /* "unsafe" */ * RowCompare inequality.
*
* Only the first subkey from a RowCompare can ever be marked
* required (that happens when the row header is marked required).
* There is no simple, general way for us to transitively deduce
* whether or not every tuple on the page satisfies a RowCompare
* key based only on firsttup and lasttup -- so we just give up.
*/
if (!start_past_saop_eq && !so->skipScan)
break; /* unsafe to go further */
/*
* We have to be even more careful with RowCompares that come
* after an array: we assume it's unsafe to even bypass the array.
* Calling _bt_start_array_keys to recover the scan's arrays
* following use of forcenonrequired mode isn't compatible with
* _bt_check_rowcompare's continuescan=false behavior with NULL
* row compare members. _bt_advance_array_keys must not make a
* decision on the basis of a key not being satisfied in the
* opposite-to-scan direction until the scan reaches a leaf page
* where the same key begins to be satisfied in scan direction.
* The _bt_first !used_all_subkeys behavior makes this limitation
* hard to work around some other way.
*/
return; /* completely unsafe to set pstate.startikey */
} }
if (key->sk_strategy != BTEqualStrategyNumber) if (key->sk_strategy != BTEqualStrategyNumber)
{ {
@ -2940,7 +3078,76 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
Assert(subkey->sk_flags & SK_ROW_MEMBER); Assert(subkey->sk_flags & SK_ROW_MEMBER);
/* When a NULL row member is compared, the row never matches */ if (subkey->sk_attno > tupnatts)
{
/*
* This attribute is truncated (must be high key). The value for
* this attribute in the first non-pivot tuple on the page to the
* right could be any possible value. Assume that truncated
* attribute passes the qual.
*/
Assert(BTreeTupleIsPivot(tuple));
cmpresult = 0;
if (subkey->sk_flags & SK_ROW_END)
break;
subkey++;
continue;
}
datum = index_getattr(tuple,
subkey->sk_attno,
tupdesc,
&isNull);
if (isNull)
{
if (forcenonrequired)
{
/* treating scan's keys as non-required */
}
else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
{
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a forward scan, however, we must keep going, because we may
* have initially positioned to the start of the index.
* (_bt_advance_array_keys also relies on this behavior during
* forward scans.)
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
*continuescan = false;
}
else
{
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
* (_bt_advance_array_keys also relies on this behavior during
* backward scans.)
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsForward(dir))
*continuescan = false;
}
/*
* In any case, this indextuple doesn't match the qual.
*/
return false;
}
if (subkey->sk_flags & SK_ISNULL) if (subkey->sk_flags & SK_ISNULL)
{ {
/* /*
@ -2965,114 +3172,6 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
return false; return false;
} }
if (subkey->sk_attno > tupnatts)
{
/*
* This attribute is truncated (must be high key). The value for
* this attribute in the first non-pivot tuple on the page to the
* right could be any possible value. Assume that truncated
* attribute passes the qual.
*/
Assert(BTreeTupleIsPivot(tuple));
return true;
}
datum = index_getattr(tuple,
subkey->sk_attno,
tupdesc,
&isNull);
if (isNull)
{
int reqflags;
if (forcenonrequired)
{
/* treating scan's keys as non-required */
}
else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
{
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. However, on a forwards
* scan, we must keep going, because we may have initially
* positioned to the start of the index.
*
* All required NULLS FIRST > row members can use NULL tuple
* values to end backwards scans, just like with other values.
* A qual "WHERE (a, b, c) > (9, 42, 'foo')" can terminate a
* backwards scan upon reaching the index's rightmost "a = 9"
* tuple whose "b" column contains a NULL (if not sooner).
* Since "b" is NULLS FIRST, we can treat its NULLs as "<" 42.
*/
reqflags = SK_BT_REQBKWD;
/*
* When a most significant required NULLS FIRST < row compare
* member sees NULL tuple values during a backwards scan, it
* signals the end of matches for the whole row compare/scan.
* A qual "WHERE (a, b, c) < (9, 42, 'foo')" will terminate a
* backwards scan upon reaching the rightmost tuple whose "a"
* column has a NULL. The "a" NULL value is "<" 9, and yet
* our < row compare will still end the scan. (This isn't
* safe with later/lower-order row members. Notice that it
* can only happen with an "a" NULL some time after the scan
* completely stops needing to use its "b" and "c" members.)
*/
if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
reqflags |= SK_BT_REQFWD; /* safe, first row member */
if ((subkey->sk_flags & reqflags) &&
ScanDirectionIsBackward(dir))
*continuescan = false;
}
else
{
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. However, on a backward
* scan, we must keep going, because we may have initially
* positioned to the end of the index.
*
* All required NULLS LAST < row members can use NULL tuple
* values to end forwards scans, just like with other values.
* A qual "WHERE (a, b, c) < (9, 42, 'foo')" can terminate a
* forwards scan upon reaching the index's leftmost "a = 9"
* tuple whose "b" column contains a NULL (if not sooner).
* Since "b" is NULLS LAST, we can treat its NULLs as ">" 42.
*/
reqflags = SK_BT_REQFWD;
/*
* When a most significant required NULLS LAST > row compare
* member sees NULL tuple values during a forwards scan, it
* signals the end of matches for the whole row compare/scan.
* A qual "WHERE (a, b, c) > (9, 42, 'foo')" will terminate a
* forwards scan upon reaching the leftmost tuple whose "a"
* column has a NULL. The "a" NULL value is ">" 9, and yet
* our > row compare will end the scan. (This isn't safe with
* later/lower-order row members. Notice that it can only
* happen with an "a" NULL some time after the scan completely
* stops needing to use its "b" and "c" members.)
*/
if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
reqflags |= SK_BT_REQBKWD; /* safe, first row member */
if ((subkey->sk_flags & reqflags) &&
ScanDirectionIsForward(dir))
*continuescan = false;
}
/*
* In any case, this indextuple doesn't match the qual.
*/
return false;
}
/* Perform the test --- three-way comparison not bool operator */ /* Perform the test --- three-way comparison not bool operator */
cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func, cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
subkey->sk_collation, subkey->sk_collation,

View File

@ -4994,25 +4994,13 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
rttg = RECOVERY_TARGET_TIMELINE_LATEST; rttg = RECOVERY_TARGET_TIMELINE_LATEST;
else else
{ {
char *endp;
uint64 timeline;
rttg = RECOVERY_TARGET_TIMELINE_NUMERIC; rttg = RECOVERY_TARGET_TIMELINE_NUMERIC;
errno = 0; errno = 0;
timeline = strtou64(*newval, &endp, 0); strtoul(*newval, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
if (*endp != '\0' || errno == EINVAL || errno == ERANGE)
{ {
GUC_check_errdetail("\"%s\" is not a valid number.", GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
"recovery_target_timeline");
return false;
}
if (timeline < 1 || timeline > PG_UINT32_MAX)
{
GUC_check_errdetail("\"%s\" must be between %u and %u.",
"recovery_target_timeline", 1, UINT_MAX);
return false; return false;
} }
} }

View File

@ -2711,7 +2711,8 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
RelationGetRelationName(relation)))); RelationGetRelationName(relation))));
/* If existing rel is temp, it must belong to this session */ /* If existing rel is temp, it must belong to this session */
if (RELATION_IS_OTHER_TEMP(relation)) if (relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!relation->rd_islocaltemp)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg(!is_partition errmsg(!is_partition
@ -17229,13 +17230,15 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
RelationGetRelationName(parent_rel)))); RelationGetRelationName(parent_rel))));
/* If parent rel is temp, it must belong to this session */ /* If parent rel is temp, it must belong to this session */
if (RELATION_IS_OTHER_TEMP(parent_rel)) if (parent_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!parent_rel->rd_islocaltemp)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot inherit from temporary relation of another session"))); errmsg("cannot inherit from temporary relation of another session")));
/* Ditto for the child */ /* Ditto for the child */
if (RELATION_IS_OTHER_TEMP(child_rel)) if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!child_rel->rd_islocaltemp)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot inherit to temporary relation of another session"))); errmsg("cannot inherit to temporary relation of another session")));
@ -20306,13 +20309,15 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd,
RelationGetRelationName(rel)))); RelationGetRelationName(rel))));
/* If the parent is temp, it must belong to this session */ /* If the parent is temp, it must belong to this session */
if (RELATION_IS_OTHER_TEMP(rel)) if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!rel->rd_islocaltemp)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot attach as partition of temporary relation of another session"))); errmsg("cannot attach as partition of temporary relation of another session")));
/* Ditto for the partition */ /* Ditto for the partition */
if (RELATION_IS_OTHER_TEMP(attachrel)) if (attachrel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!attachrel->rd_islocaltemp)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot attach temporary relation of another session as partition"))); errmsg("cannot attach temporary relation of another session as partition")));

View File

@ -53,7 +53,7 @@ llvm_irgen_args = [
if ccache.found() if ccache.found()
llvm_irgen_command = ccache llvm_irgen_command = ccache
llvm_irgen_args = [clang.full_path()] + llvm_irgen_args llvm_irgen_args = [clang.path()] + llvm_irgen_args
else else
llvm_irgen_command = clang llvm_irgen_command = clang
endif endif

View File

@ -154,17 +154,13 @@ add_paths_to_joinrel(PlannerInfo *root,
/* /*
* See if the inner relation is provably unique for this outer rel. * See if the inner relation is provably unique for this outer rel.
* *
* We have some special cases: for JOIN_SEMI, it doesn't matter since the * We have some special cases: for JOIN_SEMI and JOIN_ANTI, it doesn't
* executor can make the equivalent optimization anyway. It also doesn't * matter since the executor can make the equivalent optimization anyway;
* help enable use of Memoize, since a semijoin with a provably unique * we need not expend planner cycles on proofs. For JOIN_UNIQUE_INNER, we
* inner side should have been reduced to an inner join in that case. * must be considering a semijoin whose inner side is not provably unique
* Therefore, we need not expend planner cycles on proofs. (For * (else reduce_unique_semijoins would've simplified it), so there's no
* JOIN_ANTI, although it doesn't help the executor for the same reason, * point in calling innerrel_is_unique. However, if the LHS covers all of
* it can benefit Memoize paths.) For JOIN_UNIQUE_INNER, we must be * the semijoin's min_lefthand, then it's appropriate to set inner_unique
* considering a semijoin whose inner side is not provably unique (else
* reduce_unique_semijoins would've simplified it), so there's no point in
* calling innerrel_is_unique. However, if the LHS covers all of the
* semijoin's min_lefthand, then it's appropriate to set inner_unique
* because the path produced by create_unique_path will be unique relative * because the path produced by create_unique_path will be unique relative
* to the LHS. (If we have an LHS that's only part of the min_lefthand, * to the LHS. (If we have an LHS that's only part of the min_lefthand,
* that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid * that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid
@ -173,6 +169,12 @@ add_paths_to_joinrel(PlannerInfo *root,
switch (jointype) switch (jointype)
{ {
case JOIN_SEMI: case JOIN_SEMI:
case JOIN_ANTI:
/*
* XXX it may be worth proving this to allow a Memoize to be
* considered for Nested Loop Semi/Anti Joins.
*/
extra.inner_unique = false; /* well, unproven */ extra.inner_unique = false; /* well, unproven */
break; break;
case JOIN_UNIQUE_INNER: case JOIN_UNIQUE_INNER:
@ -713,21 +715,16 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL; return NULL;
/* /*
* Currently we don't do this for SEMI and ANTI joins, because nested loop * Currently we don't do this for SEMI and ANTI joins unless they're
* SEMI/ANTI joins don't scan the inner node to completion, which means * marked as inner_unique. This is because nested loop SEMI/ANTI joins
* memoize cannot mark the cache entry as complete. Nor can we mark the * don't scan the inner node to completion, which will mean memoize cannot
* cache entry as complete after fetching the first inner tuple, because * mark the cache entry as complete.
* if that tuple and the current outer tuple don't satisfy the join *
* clauses, a second inner tuple that satisfies the parameters would find * XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
* the cache entry already marked as complete. The only exception is when * = true. Should we? See add_paths_to_joinrel()
* the inner relation is provably unique, as in that case, there won't be
* a second matching tuple and we can safely mark the cache entry as
* complete after fetching the first inner tuple. Note that in such
* cases, the SEMI join should have been reduced to an inner join by
* reduce_unique_semijoins.
*/ */
if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) && if (!extra->inner_unique && (jointype == JOIN_SEMI ||
!extra->inner_unique) jointype == JOIN_ANTI))
return NULL; return NULL;
/* /*

View File

@ -2668,12 +2668,6 @@ alter_table_cmd:
c->alterDeferrability = true; c->alterDeferrability = true;
if ($4 & CAS_NO_INHERIT) if ($4 & CAS_NO_INHERIT)
c->alterInheritability = true; c->alterInheritability = true;
/* handle unsupported case with specific error message */
if ($4 & CAS_NOT_VALID)
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("constraints cannot be altered to be NOT VALID"),
parser_errposition(@4));
processCASbits($4, @4, "FOREIGN KEY", processCASbits($4, @4, "FOREIGN KEY",
&c->deferrable, &c->deferrable,
&c->initdeferred, &c->initdeferred,

View File

@ -15,20 +15,6 @@
* current backend. This function guarantees that only one backend * current backend. This function guarantees that only one backend
* initializes the segment and that all other backends just attach it. * initializes the segment and that all other backends just attach it.
* *
* A DSA can be created in or retrieved from the registry by calling
* GetNamedDSA(). As with GetNamedDSMSegment(), if a DSA with the provided
* name does not yet exist, it is created. Otherwise, GetNamedDSA()
* ensures the DSA is attached to the current backend. This function
* guarantees that only one backend initializes the DSA and that all other
* backends just attach it.
*
* A dshash table can be created in or retrieved from the registry by
* calling GetNamedDSHash(). As with GetNamedDSMSegment(), if a hash
* table with the provided name does not yet exist, it is created.
* Otherwise, GetNamedDSHash() ensures the hash table is attached to the
* current backend. This function guarantees that only one backend
* initializes the table and that all other backends just attach it.
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
@ -46,12 +32,6 @@
#include "storage/shmem.h" #include "storage/shmem.h"
#include "utils/memutils.h" #include "utils/memutils.h"
#define DSMR_NAME_LEN 128
#define DSMR_DSA_TRANCHE_SUFFIX " DSA"
#define DSMR_DSA_TRANCHE_SUFFIX_LEN (sizeof(DSMR_DSA_TRANCHE_SUFFIX) - 1)
#define DSMR_DSA_TRANCHE_NAME_LEN (DSMR_NAME_LEN + DSMR_DSA_TRANCHE_SUFFIX_LEN)
typedef struct DSMRegistryCtxStruct typedef struct DSMRegistryCtxStruct
{ {
dsa_handle dsah; dsa_handle dsah;
@ -60,48 +40,15 @@ typedef struct DSMRegistryCtxStruct
static DSMRegistryCtxStruct *DSMRegistryCtx; static DSMRegistryCtxStruct *DSMRegistryCtx;
typedef struct NamedDSMState
{
dsm_handle handle;
size_t size;
} NamedDSMState;
typedef struct NamedDSAState
{
dsa_handle handle;
int tranche;
char tranche_name[DSMR_DSA_TRANCHE_NAME_LEN];
} NamedDSAState;
typedef struct NamedDSHState
{
NamedDSAState dsa;
dshash_table_handle handle;
int tranche;
char tranche_name[DSMR_NAME_LEN];
} NamedDSHState;
typedef enum DSMREntryType
{
DSMR_ENTRY_TYPE_DSM,
DSMR_ENTRY_TYPE_DSA,
DSMR_ENTRY_TYPE_DSH,
} DSMREntryType;
typedef struct DSMRegistryEntry typedef struct DSMRegistryEntry
{ {
char name[DSMR_NAME_LEN]; char name[64];
DSMREntryType type; dsm_handle handle;
union size_t size;
{
NamedDSMState dsm;
NamedDSAState dsa;
NamedDSHState dsh;
} data;
} DSMRegistryEntry; } DSMRegistryEntry;
static const dshash_parameters dsh_params = { static const dshash_parameters dsh_params = {
offsetof(DSMRegistryEntry, type), offsetof(DSMRegistryEntry, handle),
sizeof(DSMRegistryEntry), sizeof(DSMRegistryEntry),
dshash_strcmp, dshash_strcmp,
dshash_strhash, dshash_strhash,
@ -194,7 +141,7 @@ GetNamedDSMSegment(const char *name, size_t size,
ereport(ERROR, ereport(ERROR,
(errmsg("DSM segment name cannot be empty"))); (errmsg("DSM segment name cannot be empty")));
if (strlen(name) >= offsetof(DSMRegistryEntry, type)) if (strlen(name) >= offsetof(DSMRegistryEntry, handle))
ereport(ERROR, ereport(ERROR,
(errmsg("DSM segment name too long"))); (errmsg("DSM segment name too long")));
@ -211,39 +158,32 @@ GetNamedDSMSegment(const char *name, size_t size,
entry = dshash_find_or_insert(dsm_registry_table, name, found); entry = dshash_find_or_insert(dsm_registry_table, name, found);
if (!(*found)) if (!(*found))
{ {
NamedDSMState *state = &entry->data.dsm;
dsm_segment *seg;
entry->type = DSMR_ENTRY_TYPE_DSM;
/* Initialize the segment. */ /* Initialize the segment. */
seg = dsm_create(size, 0); dsm_segment *seg = dsm_create(size, 0);
dsm_pin_segment(seg); dsm_pin_segment(seg);
dsm_pin_mapping(seg); dsm_pin_mapping(seg);
state->handle = dsm_segment_handle(seg); entry->handle = dsm_segment_handle(seg);
state->size = size; entry->size = size;
ret = dsm_segment_address(seg); ret = dsm_segment_address(seg);
if (init_callback) if (init_callback)
(*init_callback) (ret); (*init_callback) (ret);
} }
else if (entry->type != DSMR_ENTRY_TYPE_DSM) else if (entry->size != size)
{
ereport(ERROR, ereport(ERROR,
(errmsg("requested DSM segment does not match type of existing entry"))); (errmsg("requested DSM segment size does not match size of "
else if (entry->data.dsm.size != size) "existing segment")));
ereport(ERROR, }
(errmsg("requested DSM segment size does not match size of existing segment")));
else else
{ {
NamedDSMState *state = &entry->data.dsm; dsm_segment *seg = dsm_find_mapping(entry->handle);
dsm_segment *seg;
/* If the existing segment is not already attached, attach it now. */ /* If the existing segment is not already attached, attach it now. */
seg = dsm_find_mapping(state->handle);
if (seg == NULL) if (seg == NULL)
{ {
seg = dsm_attach(state->handle); seg = dsm_attach(entry->handle);
if (seg == NULL) if (seg == NULL)
elog(ERROR, "could not map dynamic shared memory segment"); elog(ERROR, "could not map dynamic shared memory segment");
@ -258,180 +198,3 @@ GetNamedDSMSegment(const char *name, size_t size,
return ret; return ret;
} }
/*
* Initialize or attach a named DSA.
*
* This routine returns a pointer to the DSA. A new LWLock tranche ID will be
* generated if needed. Note that the lock tranche will be registered with the
* provided name. Also note that this should be called at most once for a
* given DSA in each backend.
*/
dsa_area *
GetNamedDSA(const char *name, bool *found)
{
DSMRegistryEntry *entry;
MemoryContext oldcontext;
dsa_area *ret;
Assert(found);
if (!name || *name == '\0')
ereport(ERROR,
(errmsg("DSA name cannot be empty")));
if (strlen(name) >= offsetof(DSMRegistryEntry, type))
ereport(ERROR,
(errmsg("DSA name too long")));
/* Be sure any local memory allocated by DSM/DSA routines is persistent. */
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
/* Connect to the registry. */
init_dsm_registry();
entry = dshash_find_or_insert(dsm_registry_table, name, found);
if (!(*found))
{
NamedDSAState *state = &entry->data.dsa;
entry->type = DSMR_ENTRY_TYPE_DSA;
/* Initialize the LWLock tranche for the DSA. */
state->tranche = LWLockNewTrancheId();
strcpy(state->tranche_name, name);
LWLockRegisterTranche(state->tranche, state->tranche_name);
/* Initialize the DSA. */
ret = dsa_create(state->tranche);
dsa_pin(ret);
dsa_pin_mapping(ret);
/* Store handle for other backends to use. */
state->handle = dsa_get_handle(ret);
}
else if (entry->type != DSMR_ENTRY_TYPE_DSA)
ereport(ERROR,
(errmsg("requested DSA does not match type of existing entry")));
else
{
NamedDSAState *state = &entry->data.dsa;
if (dsa_is_attached(state->handle))
ereport(ERROR,
(errmsg("requested DSA already attached to current process")));
/* Initialize existing LWLock tranche for the DSA. */
LWLockRegisterTranche(state->tranche, state->tranche_name);
/* Attach to existing DSA. */
ret = dsa_attach(state->handle);
dsa_pin_mapping(ret);
}
dshash_release_lock(dsm_registry_table, entry);
MemoryContextSwitchTo(oldcontext);
return ret;
}
/*
* Initialize or attach a named dshash table.
*
* This routine returns the address of the table. The tranche_id member of
* params is ignored; new tranche IDs will be generated if needed. Note that
* the DSA lock tranche will be registered with the provided name with " DSA"
* appended. The dshash lock tranche will be registered with the provided
* name. Also note that this should be called at most once for a given table
* in each backend.
*/
dshash_table *
GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found)
{
DSMRegistryEntry *entry;
MemoryContext oldcontext;
dshash_table *ret;
Assert(params);
Assert(found);
if (!name || *name == '\0')
ereport(ERROR,
(errmsg("DSHash name cannot be empty")));
if (strlen(name) >= offsetof(DSMRegistryEntry, type))
ereport(ERROR,
(errmsg("DSHash name too long")));
/* Be sure any local memory allocated by DSM/DSA routines is persistent. */
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
/* Connect to the registry. */
init_dsm_registry();
entry = dshash_find_or_insert(dsm_registry_table, name, found);
if (!(*found))
{
NamedDSAState *dsa_state = &entry->data.dsh.dsa;
NamedDSHState *dsh_state = &entry->data.dsh;
dshash_parameters params_copy;
dsa_area *dsa;
entry->type = DSMR_ENTRY_TYPE_DSH;
/* Initialize the LWLock tranche for the DSA. */
dsa_state->tranche = LWLockNewTrancheId();
sprintf(dsa_state->tranche_name, "%s%s", name, DSMR_DSA_TRANCHE_SUFFIX);
LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
/* Initialize the LWLock tranche for the dshash table. */
dsh_state->tranche = LWLockNewTrancheId();
strcpy(dsh_state->tranche_name, name);
LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
/* Initialize the DSA for the hash table. */
dsa = dsa_create(dsa_state->tranche);
dsa_pin(dsa);
dsa_pin_mapping(dsa);
/* Initialize the dshash table. */
memcpy(&params_copy, params, sizeof(dshash_parameters));
params_copy.tranche_id = dsh_state->tranche;
ret = dshash_create(dsa, &params_copy, NULL);
/* Store handles for other backends to use. */
dsa_state->handle = dsa_get_handle(dsa);
dsh_state->handle = dshash_get_hash_table_handle(ret);
}
else if (entry->type != DSMR_ENTRY_TYPE_DSH)
ereport(ERROR,
(errmsg("requested DSHash does not match type of existing entry")));
else
{
NamedDSAState *dsa_state = &entry->data.dsh.dsa;
NamedDSHState *dsh_state = &entry->data.dsh;
dsa_area *dsa;
/* XXX: Should we verify params matches what table was created with? */
if (dsa_is_attached(dsa_state->handle))
ereport(ERROR,
(errmsg("requested DSHash already attached to current process")));
/* Initialize existing LWLock tranches for the DSA and dshash table. */
LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
/* Attach to existing DSA for the hash table. */
dsa = dsa_attach(dsa_state->handle);
dsa_pin_mapping(dsa);
/* Attach to existing dshash table. */
ret = dshash_attach(dsa, params, dsh_state->handle, NULL);
}
dshash_release_lock(dsm_registry_table, entry);
MemoryContextSwitchTo(oldcontext);
return ret;
}

View File

@ -4067,9 +4067,8 @@ float84ge(PG_FUNCTION_ARGS)
* with the specified characteristics. An operand smaller than the * with the specified characteristics. An operand smaller than the
* lower bound is assigned to bucket 0. An operand greater than or equal * lower bound is assigned to bucket 0. An operand greater than or equal
* to the upper bound is assigned to an additional bucket (with number * to the upper bound is assigned to an additional bucket (with number
* count+1). We don't allow the histogram bounds to be NaN or +/- infinity, * count+1). We don't allow "NaN" for any of the float8 inputs, and we
* but we do allow those values for the operand (taking NaN to be larger * don't allow either of the histogram bounds to be +/- infinity.
* than any other value, as we do in comparisons).
*/ */
Datum Datum
width_bucket_float8(PG_FUNCTION_ARGS) width_bucket_float8(PG_FUNCTION_ARGS)
@ -4085,11 +4084,12 @@ width_bucket_float8(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("count must be greater than zero"))); errmsg("count must be greater than zero")));
if (isnan(bound1) || isnan(bound2)) if (isnan(operand) || isnan(bound1) || isnan(bound2))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("lower and upper bounds cannot be NaN"))); errmsg("operand, lower bound, and upper bound cannot be NaN")));
/* Note that we allow "operand" to be infinite */
if (isinf(bound1) || isinf(bound2)) if (isinf(bound1) || isinf(bound2))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
@ -4097,15 +4097,15 @@ width_bucket_float8(PG_FUNCTION_ARGS)
if (bound1 < bound2) if (bound1 < bound2)
{ {
if (isnan(operand) || operand >= bound2) if (operand < bound1)
result = 0;
else if (operand >= bound2)
{ {
if (pg_add_s32_overflow(count, 1, &result)) if (pg_add_s32_overflow(count, 1, &result))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range"))); errmsg("integer out of range")));
} }
else if (operand < bound1)
result = 0;
else else
{ {
if (!isinf(bound2 - bound1)) if (!isinf(bound2 - bound1))
@ -4135,7 +4135,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
} }
else if (bound1 > bound2) else if (bound1 > bound2)
{ {
if (isnan(operand) || operand > bound1) if (operand > bound1)
result = 0; result = 0;
else if (operand <= bound2) else if (operand <= bound2)
{ {

View File

@ -1960,9 +1960,8 @@ generate_series_numeric_support(PG_FUNCTION_ARGS)
* with the specified characteristics. An operand smaller than the * with the specified characteristics. An operand smaller than the
* lower bound is assigned to bucket 0. An operand greater than or equal * lower bound is assigned to bucket 0. An operand greater than or equal
* to the upper bound is assigned to an additional bucket (with number * to the upper bound is assigned to an additional bucket (with number
* count+1). We don't allow the histogram bounds to be NaN or +/- infinity, * count+1). We don't allow "NaN" for any of the numeric inputs, and we
* but we do allow those values for the operand (taking NaN to be larger * don't allow either of the histogram bounds to be +/- infinity.
* than any other value, as we do in comparisons).
*/ */
Datum Datum
width_bucket_numeric(PG_FUNCTION_ARGS) width_bucket_numeric(PG_FUNCTION_ARGS)
@ -1980,13 +1979,17 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("count must be greater than zero"))); errmsg("count must be greater than zero")));
if (NUMERIC_IS_SPECIAL(bound1) || NUMERIC_IS_SPECIAL(bound2)) if (NUMERIC_IS_SPECIAL(operand) ||
NUMERIC_IS_SPECIAL(bound1) ||
NUMERIC_IS_SPECIAL(bound2))
{ {
if (NUMERIC_IS_NAN(bound1) || NUMERIC_IS_NAN(bound2)) if (NUMERIC_IS_NAN(operand) ||
NUMERIC_IS_NAN(bound1) ||
NUMERIC_IS_NAN(bound2))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("lower and upper bounds cannot be NaN"))); errmsg("operand, lower bound, and upper bound cannot be NaN")));
/* We allow "operand" to be infinite; cmp_numerics will cope */
if (NUMERIC_IS_INF(bound1) || NUMERIC_IS_INF(bound2)) if (NUMERIC_IS_INF(bound1) || NUMERIC_IS_INF(bound2))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),

View File

@ -584,49 +584,3 @@ IsInjectionPointAttached(const char *name)
return false; /* silence compiler */ return false; /* silence compiler */
#endif #endif
} }
/*
* Retrieve a list of all the injection points currently attached.
*
* This list is palloc'd in the current memory context.
*/
List *
InjectionPointList(void)
{
#ifdef USE_INJECTION_POINTS
List *inj_points = NIL;
uint32 max_inuse;
LWLockAcquire(InjectionPointLock, LW_SHARED);
max_inuse = pg_atomic_read_u32(&ActiveInjectionPoints->max_inuse);
for (uint32 idx = 0; idx < max_inuse; idx++)
{
InjectionPointEntry *entry;
InjectionPointData *inj_point;
uint64 generation;
entry = &ActiveInjectionPoints->entries[idx];
generation = pg_atomic_read_u64(&entry->generation);
/* skip free slots */
if (generation % 2 == 0)
continue;
inj_point = (InjectionPointData *) palloc0(sizeof(InjectionPointData));
inj_point->name = pstrdup(entry->name);
inj_point->library = pstrdup(entry->library);
inj_point->function = pstrdup(entry->function);
inj_points = lappend(inj_points, inj_point);
}
LWLockRelease(InjectionPointLock);
return inj_points;
#else
elog(ERROR, "Injection points are not supported by this build");
return NIL; /* keep compiler quiet */
#endif
}

View File

@ -531,21 +531,6 @@ dsa_attach(dsa_handle handle)
return area; return area;
} }
/*
* Returns whether the area with the given handle was already attached by the
* current process. The area must have been created with dsa_create (not
* dsa_create_in_place).
*/
bool
dsa_is_attached(dsa_handle handle)
{
/*
* An area handle is really a DSM segment handle for the first segment, so
* we can just search for that.
*/
return dsm_find_mapping(handle) != NULL;
}
/* /*
* Attach to an area that was created with dsa_create_in_place. The caller * Attach to an area that was created with dsa_create_in_place. The caller
* must somehow know the location in memory that was used when the area was * must somehow know the location in memory that was used when the area was

View File

@ -93,9 +93,9 @@ tests += {
'sd': meson.current_source_dir(), 'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(), 'bd': meson.current_build_dir(),
'tap': { 'tap': {
'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '', 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
'TAR': tar.found() ? tar.full_path() : '', 'TAR': tar.found() ? tar.path() : '',
'LZ4': program_lz4.found() ? program_lz4.full_path() : '', 'LZ4': program_lz4.found() ? program_lz4.path() : '',
}, },
'tests': [ 'tests': [
't/010_pg_basebackup.pl', 't/010_pg_basebackup.pl',

View File

@ -91,9 +91,9 @@ tests += {
'bd': meson.current_build_dir(), 'bd': meson.current_build_dir(),
'tap': { 'tap': {
'env': { 'env': {
'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '', 'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
'LZ4': program_lz4.found() ? program_lz4.full_path() : '', 'LZ4': program_lz4.found() ? program_lz4.path() : '',
'ZSTD': program_zstd.found() ? program_zstd.full_path() : '', 'ZSTD': program_zstd.found() ? program_zstd.path() : '',
'with_icu': icu.found() ? 'yes' : 'no', 'with_icu': icu.found() ? 'yes' : 'no',
}, },
'tests': [ 'tests': [

View File

@ -23,10 +23,10 @@ tests += {
'sd': meson.current_source_dir(), 'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(), 'bd': meson.current_build_dir(),
'tap': { 'tap': {
'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '', 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
'TAR': tar.found() ? tar.full_path() : '', 'TAR': tar.found() ? tar.path() : '',
'LZ4': program_lz4.found() ? program_lz4.full_path() : '', 'LZ4': program_lz4.found() ? program_lz4.path() : '',
'ZSTD': program_zstd.found() ? program_zstd.full_path() : ''}, 'ZSTD': program_zstd.found() ? program_zstd.path() : ''},
'tests': [ 'tests': [
't/001_basic.pl', 't/001_basic.pl',
't/002_algorithm.pl', 't/002_algorithm.pl',

View File

@ -16,22 +16,6 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1); $primary->init(allows_streaming => 1);
$primary->start; $primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
# Create a tablespace directory. # Create a tablespace directory.
my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short(); my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
@ -68,12 +52,6 @@ my @test_configuration = (
'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ], 'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
'enabled' => check_pg_config("#define USE_LZ4 1") 'enabled' => check_pg_config("#define USE_LZ4 1")
}, },
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'server-lz4:5' ],
'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{ {
'compression_method' => 'zstd', 'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'server-zstd' ], 'backup_flags' => [ '--compress', 'server-zstd' ],

View File

@ -15,22 +15,6 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1); $primary->init(allows_streaming => 1);
$primary->start; $primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
my $backup_path = $primary->backup_dir . '/client-backup'; my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup'; my $extract_path = $primary->backup_dir . '/extracted-backup';
@ -53,12 +37,6 @@ my @test_configuration = (
'backup_archive' => 'base.tar.lz4', 'backup_archive' => 'base.tar.lz4',
'enabled' => check_pg_config("#define USE_LZ4 1") 'enabled' => check_pg_config("#define USE_LZ4 1")
}, },
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'client-lz4:1' ],
'backup_archive' => 'base.tar.lz4',
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{ {
'compression_method' => 'zstd', 'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'client-zstd:5' ], 'backup_flags' => [ '--compress', 'client-zstd:5' ],

View File

@ -322,9 +322,9 @@ astreamer_lz4_decompressor_content(astreamer *streamer,
mystreamer = (astreamer_lz4_frame *) streamer; mystreamer = (astreamer_lz4_frame *) streamer;
next_in = (uint8 *) data; next_in = (uint8 *) data;
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written; next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
avail_in = len; avail_in = len;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written; avail_out = mystreamer->base.bbs_buffer.maxlen;
while (avail_in > 0) while (avail_in > 0)
{ {

View File

@ -157,6 +157,35 @@ typedef struct ExprState
* entries for a particular index. Used for both index_build and * entries for a particular index. Used for both index_build and
* retail creation of index entries. * retail creation of index entries.
* *
* NumIndexAttrs total number of columns in this index
* NumIndexKeyAttrs number of key columns in index
* IndexAttrNumbers underlying-rel attribute numbers used as keys
* (zeroes indicate expressions). It also contains
* info about included columns.
* Expressions expr trees for expression entries, or NIL if none
* ExpressionsState exec state for expressions, or NIL if none
* Predicate partial-index predicate, or NIL if none
* PredicateState exec state for predicate, or NIL if none
* ExclusionOps Per-column exclusion operators, or NULL if none
* ExclusionProcs Underlying function OIDs for ExclusionOps
* ExclusionStrats Opclass strategy numbers for ExclusionOps
* UniqueOps These are like Exclusion*, but for unique indexes
* UniqueProcs
* UniqueStrats
* Unique is it a unique index?
* NullsNotDistinct is NULLS NOT DISTINCT?
* ReadyForInserts is it valid for inserts?
* CheckedUnchanged IndexUnchanged status determined yet?
* IndexUnchanged aminsert hint, cached for retail inserts
* Concurrent are we doing a concurrent index build?
* BrokenHotChain did we detect any broken HOT chains?
* WithoutOverlaps is it a WITHOUT OVERLAPS index?
* Summarizing is it a summarizing index?
* ParallelWorkers # of workers requested (excludes leader)
* Am Oid of index AM
* AmCache private cache area for index AM
* Context memory context holding this IndexInfo
*
* ii_Concurrent, ii_BrokenHotChain, and ii_ParallelWorkers are used only * ii_Concurrent, ii_BrokenHotChain, and ii_ParallelWorkers are used only
* during index build; they're conventionally zeroed otherwise. * during index build; they're conventionally zeroed otherwise.
* ---------------- * ----------------
@ -164,67 +193,31 @@ typedef struct ExprState
typedef struct IndexInfo typedef struct IndexInfo
{ {
NodeTag type; NodeTag type;
int ii_NumIndexAttrs; /* total number of columns in index */
/* total number of columns in index */ int ii_NumIndexKeyAttrs; /* number of key columns in index */
int ii_NumIndexAttrs;
/* number of key columns in index */
int ii_NumIndexKeyAttrs;
/*
* Underlying-rel attribute numbers used as keys (zeroes indicate
* expressions). It also contains info about included columns.
*/
AttrNumber ii_IndexAttrNumbers[INDEX_MAX_KEYS]; AttrNumber ii_IndexAttrNumbers[INDEX_MAX_KEYS];
/* expr trees for expression entries, or NIL if none */
List *ii_Expressions; /* list of Expr */ List *ii_Expressions; /* list of Expr */
/* exec state for expressions, or NIL if none */
List *ii_ExpressionsState; /* list of ExprState */ List *ii_ExpressionsState; /* list of ExprState */
/* partial-index predicate, or NIL if none */
List *ii_Predicate; /* list of Expr */ List *ii_Predicate; /* list of Expr */
/* exec state for expressions, or NIL if none */
ExprState *ii_PredicateState; ExprState *ii_PredicateState;
/* Per-column exclusion operators, or NULL if none */
Oid *ii_ExclusionOps; /* array with one entry per column */ Oid *ii_ExclusionOps; /* array with one entry per column */
/* Underlying function OIDs for ExclusionOps */
Oid *ii_ExclusionProcs; /* array with one entry per column */ Oid *ii_ExclusionProcs; /* array with one entry per column */
/* Opclass strategy numbers for ExclusionOps */
uint16 *ii_ExclusionStrats; /* array with one entry per column */ uint16 *ii_ExclusionStrats; /* array with one entry per column */
/* These are like Exclusion*, but for unique indexes */
Oid *ii_UniqueOps; /* array with one entry per column */ Oid *ii_UniqueOps; /* array with one entry per column */
Oid *ii_UniqueProcs; /* array with one entry per column */ Oid *ii_UniqueProcs; /* array with one entry per column */
uint16 *ii_UniqueStrats; /* array with one entry per column */ uint16 *ii_UniqueStrats; /* array with one entry per column */
/* is it a unique index? */
bool ii_Unique; bool ii_Unique;
/* is NULLS NOT DISTINCT? */
bool ii_NullsNotDistinct; bool ii_NullsNotDistinct;
/* is it valid for inserts? */
bool ii_ReadyForInserts; bool ii_ReadyForInserts;
/* IndexUnchanged status determined yet? */
bool ii_CheckedUnchanged; bool ii_CheckedUnchanged;
/* aminsert hint, cached for retail inserts */
bool ii_IndexUnchanged; bool ii_IndexUnchanged;
/* are we doing a concurrent index build? */
bool ii_Concurrent; bool ii_Concurrent;
/* did we detect any broken HOT chains? */
bool ii_BrokenHotChain; bool ii_BrokenHotChain;
/* is it a summarizing index? */
bool ii_Summarizing; bool ii_Summarizing;
/* is it a WITHOUT OVERLAPS index? */
bool ii_WithoutOverlaps; bool ii_WithoutOverlaps;
/* # of workers requested (excludes leader) */
int ii_ParallelWorkers; int ii_ParallelWorkers;
/* Oid of index AM */
Oid ii_Am; Oid ii_Am;
/* private cache area for index AM */
void *ii_AmCache; void *ii_AmCache;
/* memory context holding this IndexInfo */
MemoryContext ii_Context; MemoryContext ii_Context;
} IndexInfo; } IndexInfo;

View File

@ -28,7 +28,7 @@ node_support_input_i = [
node_support_input = [] node_support_input = []
foreach i : node_support_input_i foreach i : node_support_input_i
node_support_input += meson.project_source_root() / 'src' / 'include' / i node_support_input += meson.source_root() / 'src' / 'include' / i
endforeach endforeach
node_support_output = [ node_support_output = [

View File

@ -1,6 +1,6 @@
# Copyright (c) 2022-2025, PostgreSQL Global Development Group # Copyright (c) 2022-2025, PostgreSQL Global Development Group
# See https://github.com/mesonbuild/meson/issues/10338 # See https://github.com/mesonbuild/meson/issues/10338
pch_c_h = meson.project_source_root() / meson.current_source_dir() / 'c_pch.h' pch_c_h = meson.source_root() / meson.current_source_dir() / 'c_pch.h'
pch_postgres_h = meson.project_source_root() / meson.current_source_dir() / 'postgres_pch.h' pch_postgres_h = meson.source_root() / meson.current_source_dir() / 'postgres_pch.h'
pch_postgres_fe_h = meson.project_source_root() / meson.current_source_dir() / 'postgres_fe_pch.h' pch_postgres_fe_h = meson.source_root() / meson.current_source_dir() / 'postgres_fe_pch.h'

View File

@ -72,7 +72,7 @@ pg_comp_crc32c_dispatch(pg_crc32c crc, const void *data, size_t len)
{ {
if (__builtin_constant_p(len) && len < 32) if (__builtin_constant_p(len) && len < 32)
{ {
const unsigned char *p = (const unsigned char *) data; const unsigned char *p = data;
/* /*
* For small constant inputs, inline the computation to avoid a * For small constant inputs, inline the computation to avoid a

View File

@ -13,15 +13,10 @@
#ifndef DSM_REGISTRY_H #ifndef DSM_REGISTRY_H
#define DSM_REGISTRY_H #define DSM_REGISTRY_H
#include "lib/dshash.h"
extern void *GetNamedDSMSegment(const char *name, size_t size, extern void *GetNamedDSMSegment(const char *name, size_t size,
void (*init_callback) (void *ptr), void (*init_callback) (void *ptr),
bool *found); bool *found);
extern dsa_area *GetNamedDSA(const char *name, bool *found);
extern dshash_table *GetNamedDSHash(const char *name,
const dshash_parameters *params,
bool *found);
extern Size DSMRegistryShmemSize(void); extern Size DSMRegistryShmemSize(void);
extern void DSMRegistryShmemInit(void); extern void DSMRegistryShmemInit(void);

View File

@ -145,7 +145,6 @@ extern dsa_area *dsa_create_in_place_ext(void *place, size_t size,
size_t init_segment_size, size_t init_segment_size,
size_t max_segment_size); size_t max_segment_size);
extern dsa_area *dsa_attach(dsa_handle handle); extern dsa_area *dsa_attach(dsa_handle handle);
extern bool dsa_is_attached(dsa_handle handle);
extern dsa_area *dsa_attach_in_place(void *place, dsm_segment *segment); extern dsa_area *dsa_attach_in_place(void *place, dsm_segment *segment);
extern void dsa_release_in_place(void *place); extern void dsa_release_in_place(void *place);
extern void dsa_on_dsm_detach_release_in_place(dsm_segment *, Datum); extern void dsa_on_dsm_detach_release_in_place(dsm_segment *, Datum);

View File

@ -11,19 +11,6 @@
#ifndef INJECTION_POINT_H #ifndef INJECTION_POINT_H
#define INJECTION_POINT_H #define INJECTION_POINT_H
#include "nodes/pg_list.h"
/*
* Injection point data, used when retrieving a list of all the attached
* injection points.
*/
typedef struct InjectionPointData
{
const char *name;
const char *library;
const char *function;
} InjectionPointData;
/* /*
* Injection points require --enable-injection-points. * Injection points require --enable-injection-points.
*/ */
@ -60,9 +47,6 @@ extern void InjectionPointCached(const char *name, void *arg);
extern bool IsInjectionPointAttached(const char *name); extern bool IsInjectionPointAttached(const char *name);
extern bool InjectionPointDetach(const char *name); extern bool InjectionPointDetach(const char *name);
/* Get the current set of injection points attached */
extern List *InjectionPointList(void);
#ifdef EXEC_BACKEND #ifdef EXEC_BACKEND
extern PGDLLIMPORT struct InjectionPointsCtl *ActiveInjectionPoints; extern PGDLLIMPORT struct InjectionPointsCtl *ActiveInjectionPoints;
#endif #endif

View File

@ -137,7 +137,6 @@ PQcancelCreate(PGconn *conn)
goto oom_error; goto oom_error;
originalHost = conn->connhost[conn->whichhost]; originalHost = conn->connhost[conn->whichhost];
cancelConn->connhost[0].type = originalHost.type;
if (originalHost.host) if (originalHost.host)
{ {
cancelConn->connhost[0].host = strdup(originalHost.host); cancelConn->connhost[0].host = strdup(originalHost.host);

View File

@ -6,7 +6,7 @@
# Emulation of PGAC_CHECK_STRIP # Emulation of PGAC_CHECK_STRIP
strip_bin = find_program(get_option('STRIP'), required: false, native: true) strip_bin = find_program(get_option('STRIP'), required: false, native: true)
strip_cmd = strip_bin.found() ? [strip_bin.full_path()] : [':'] strip_cmd = strip_bin.found() ? [strip_bin.path()] : [':']
working_strip = false working_strip = false
if strip_bin.found() if strip_bin.found()
@ -49,8 +49,8 @@ pgxs_kv = {
'PORTNAME': portname, 'PORTNAME': portname,
'PG_SYSROOT': pg_sysroot, 'PG_SYSROOT': pg_sysroot,
'abs_top_builddir': meson.project_build_root(), 'abs_top_builddir': meson.build_root(),
'abs_top_srcdir': meson.project_source_root(), 'abs_top_srcdir': meson.source_root(),
'enable_rpath': get_option('rpath') ? 'yes' : 'no', 'enable_rpath': get_option('rpath') ? 'yes' : 'no',
'enable_nls': libintl.found() ? 'yes' : 'no', 'enable_nls': libintl.found() ? 'yes' : 'no',
@ -123,7 +123,7 @@ pgxs_kv = {
if llvm.found() if llvm.found()
pgxs_kv += { pgxs_kv += {
'CLANG': clang.full_path(), 'CLANG': clang.path(),
'CXX': ' '.join(cpp.cmd_array()), 'CXX': ' '.join(cpp.cmd_array()),
'LLVM_BINPATH': llvm_binpath, 'LLVM_BINPATH': llvm_binpath,
} }
@ -258,7 +258,7 @@ pgxs_deps = {
pgxs_cdata = configuration_data(pgxs_kv) pgxs_cdata = configuration_data(pgxs_kv)
foreach b, p : pgxs_bins foreach b, p : pgxs_bins
pgxs_cdata.set(b, p.found() ? p.full_path() : '') pgxs_cdata.set(b, p.found() ? p.path() : '')
endforeach endforeach
foreach pe : pgxs_empty foreach pe : pgxs_empty

View File

@ -96,7 +96,7 @@ tests += {
'plperl_transaction', 'plperl_transaction',
'plperl_env', 'plperl_env',
], ],
'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
}, },
} }

View File

@ -39,7 +39,7 @@ tests += {
'reindex_conc', 'reindex_conc',
'vacuum', 'vacuum',
], ],
'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
# The injection points are cluster-wide, so disable installcheck # The injection points are cluster-wide, so disable installcheck
'runningcheck': false, 'runningcheck': false,
}, },

View File

@ -77,7 +77,7 @@ tests += {
't/002_client.pl', 't/002_client.pl',
], ],
'env': { 'env': {
'PYTHON': python.full_path(), 'PYTHON': python.path(),
'with_libcurl': oauth_flow_supported ? 'yes' : 'no', 'with_libcurl': oauth_flow_supported ? 'yes' : 'no',
'with_python': 'yes', 'with_python': 'yes',
}, },

View File

@ -5,12 +5,6 @@ SELECT set_val_in_shmem(1236);
(1 row) (1 row)
SELECT set_val_in_hash('test', '1414');
set_val_in_hash
-----------------
(1 row)
\c \c
SELECT get_val_in_shmem(); SELECT get_val_in_shmem();
get_val_in_shmem get_val_in_shmem
@ -18,9 +12,3 @@ SELECT get_val_in_shmem();
1236 1236
(1 row) (1 row)
SELECT get_val_in_hash('test');
get_val_in_hash
-----------------
1414
(1 row)

View File

@ -1,6 +1,4 @@
CREATE EXTENSION test_dsm_registry; CREATE EXTENSION test_dsm_registry;
SELECT set_val_in_shmem(1236); SELECT set_val_in_shmem(1236);
SELECT set_val_in_hash('test', '1414');
\c \c
SELECT get_val_in_shmem(); SELECT get_val_in_shmem();
SELECT get_val_in_hash('test');

View File

@ -8,9 +8,3 @@ CREATE FUNCTION set_val_in_shmem(val INT) RETURNS VOID
CREATE FUNCTION get_val_in_shmem() RETURNS INT CREATE FUNCTION get_val_in_shmem() RETURNS INT
AS 'MODULE_PATHNAME' LANGUAGE C; AS 'MODULE_PATHNAME' LANGUAGE C;
CREATE FUNCTION set_val_in_hash(key TEXT, val TEXT) RETURNS VOID
AS 'MODULE_PATHNAME' LANGUAGE C;
CREATE FUNCTION get_val_in_hash(key TEXT) RETURNS TEXT
AS 'MODULE_PATHNAME' LANGUAGE C;

View File

@ -15,7 +15,6 @@
#include "fmgr.h" #include "fmgr.h"
#include "storage/dsm_registry.h" #include "storage/dsm_registry.h"
#include "storage/lwlock.h" #include "storage/lwlock.h"
#include "utils/builtins.h"
PG_MODULE_MAGIC; PG_MODULE_MAGIC;
@ -25,31 +24,15 @@ typedef struct TestDSMRegistryStruct
LWLock lck; LWLock lck;
} TestDSMRegistryStruct; } TestDSMRegistryStruct;
typedef struct TestDSMRegistryHashEntry static TestDSMRegistryStruct *tdr_state;
{
char key[64];
dsa_pointer val;
} TestDSMRegistryHashEntry;
static TestDSMRegistryStruct *tdr_dsm;
static dsa_area *tdr_dsa;
static dshash_table *tdr_hash;
static const dshash_parameters dsh_params = {
offsetof(TestDSMRegistryHashEntry, val),
sizeof(TestDSMRegistryHashEntry),
dshash_strcmp,
dshash_strhash,
dshash_strcpy
};
static void static void
init_tdr_dsm(void *ptr) tdr_init_shmem(void *ptr)
{ {
TestDSMRegistryStruct *dsm = (TestDSMRegistryStruct *) ptr; TestDSMRegistryStruct *state = (TestDSMRegistryStruct *) ptr;
LWLockInitialize(&dsm->lck, LWLockNewTrancheId()); LWLockInitialize(&state->lck, LWLockNewTrancheId());
dsm->val = 0; state->val = 0;
} }
static void static void
@ -57,17 +40,11 @@ tdr_attach_shmem(void)
{ {
bool found; bool found;
tdr_dsm = GetNamedDSMSegment("test_dsm_registry_dsm", tdr_state = GetNamedDSMSegment("test_dsm_registry",
sizeof(TestDSMRegistryStruct), sizeof(TestDSMRegistryStruct),
init_tdr_dsm, tdr_init_shmem,
&found); &found);
LWLockRegisterTranche(tdr_dsm->lck.tranche, "test_dsm_registry"); LWLockRegisterTranche(tdr_state->lck.tranche, "test_dsm_registry");
if (tdr_dsa == NULL)
tdr_dsa = GetNamedDSA("test_dsm_registry_dsa", &found);
if (tdr_hash == NULL)
tdr_hash = GetNamedDSHash("test_dsm_registry_hash", &dsh_params, &found);
} }
PG_FUNCTION_INFO_V1(set_val_in_shmem); PG_FUNCTION_INFO_V1(set_val_in_shmem);
@ -76,9 +53,9 @@ set_val_in_shmem(PG_FUNCTION_ARGS)
{ {
tdr_attach_shmem(); tdr_attach_shmem();
LWLockAcquire(&tdr_dsm->lck, LW_EXCLUSIVE); LWLockAcquire(&tdr_state->lck, LW_EXCLUSIVE);
tdr_dsm->val = PG_GETARG_INT32(0); tdr_state->val = PG_GETARG_INT32(0);
LWLockRelease(&tdr_dsm->lck); LWLockRelease(&tdr_state->lck);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
@ -91,57 +68,9 @@ get_val_in_shmem(PG_FUNCTION_ARGS)
tdr_attach_shmem(); tdr_attach_shmem();
LWLockAcquire(&tdr_dsm->lck, LW_SHARED); LWLockAcquire(&tdr_state->lck, LW_SHARED);
ret = tdr_dsm->val; ret = tdr_state->val;
LWLockRelease(&tdr_dsm->lck); LWLockRelease(&tdr_state->lck);
PG_RETURN_INT32(ret); PG_RETURN_INT32(ret);
} }
PG_FUNCTION_INFO_V1(set_val_in_hash);
Datum
set_val_in_hash(PG_FUNCTION_ARGS)
{
TestDSMRegistryHashEntry *entry;
char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
char *val = TextDatumGetCString(PG_GETARG_DATUM(1));
bool found;
if (strlen(key) >= offsetof(TestDSMRegistryHashEntry, val))
ereport(ERROR,
(errmsg("key too long")));
tdr_attach_shmem();
entry = dshash_find_or_insert(tdr_hash, key, &found);
if (found)
dsa_free(tdr_dsa, entry->val);
entry->val = dsa_allocate(tdr_dsa, strlen(val) + 1);
strcpy(dsa_get_address(tdr_dsa, entry->val), val);
dshash_release_lock(tdr_hash, entry);
PG_RETURN_VOID();
}
PG_FUNCTION_INFO_V1(get_val_in_hash);
Datum
get_val_in_hash(PG_FUNCTION_ARGS)
{
TestDSMRegistryHashEntry *entry;
char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
text *val = NULL;
tdr_attach_shmem();
entry = dshash_find(tdr_hash, key, false);
if (entry == NULL)
PG_RETURN_NULL();
val = cstring_to_text(dsa_get_address(tdr_dsa, entry->val));
dshash_release_lock(tdr_hash, entry);
PG_RETURN_TEXT_P(val);
}

View File

@ -187,54 +187,4 @@ ok( $logfile =~
qr/FATAL: .* recovery ended before configured recovery target was reached/, qr/FATAL: .* recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error'); 'recovery end before target reached is a fatal error');
# Invalid timeline target
$node_standby = PostgreSQL::Test::Cluster->new('standby_9');
$node_standby->init_from_backup($node_primary, 'my_backup',
has_restoring => 1);
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = 'bogus'");
$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (bogus value)');
my $log_start = $node_standby->wait_for_log("is not a valid number");
# Timeline target out of min range
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = '0'");
$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (lower bound check)');
$log_start =
$node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
# Timeline target out of max range
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = '4294967296'");
$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (upper bound check)');
$log_start =
$node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
done_testing(); done_testing();

View File

@ -195,123 +195,54 @@ ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
(1 row) (1 row)
-- --
-- Forwards scan RowCompare qual whose row arg has a NULL that affects our -- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
-- initial positioning strategy
-- --
explain (costs off) explain (costs off)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace; ORDER BY proname, proargtypes, pronamespace;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------------------------
Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
Index Cond: ((ROW(proname, proargtypes) >= ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name)) Index Cond: ((ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
(2 rows) (2 rows)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace; ORDER BY proname, proargtypes, pronamespace;
proname | proargtypes | pronamespace proname | proargtypes | pronamespace
---------+-------------+-------------- ---------+-------------+--------------
(0 rows) (0 rows)
-- --
-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan -- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
-- that ends scan
-- --
explain (costs off) explain (costs off)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL) WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------
Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
--
-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------------------------
Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) <= ROW('abs'::name, NULL::oidvector))) Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
(2 rows) (2 rows)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL) WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
proname | proargtypes | pronamespace proname | proargtypes | pronamespace
---------+-------------+-------------- ---------+-------------+--------------
(0 rows) (0 rows)
-- --
-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan -- Add coverage for recheck of > key following array advancement on previous
-- -- (left sibling) page that used a high key whose attribute value corresponding
explain (costs off) -- to the > key was -inf (due to being truncated when the high key was created).
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------
Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
-- initially marked required (test case relies on current row compare
-- preprocessing limitations)
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
ORDER BY proname, proargtypes, pronamespace;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'zzzzzz'::name) AND (proargtypes = ANY ('{"26 23",5077}'::oidvector[])) AND (pronamespace = ANY ('{1,2,3}'::oid[])))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
ORDER BY proname, proargtypes, pronamespace;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
--
-- Performs a recheck of > key following array advancement on previous (left
-- sibling) page that used a high key whose attribute value corresponding to
-- the > key was -inf (due to being truncated when the high key was created).
-- --
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated -- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only -- high key "(183, -inf)" on the first page that we'll scan. The test will only

View File

@ -748,11 +748,6 @@ ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl" ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED; ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl" ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
-- can't make an existing constraint NOT VALID
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
ERROR: constraints cannot be altered to be NOT VALID
LINE 1: ...ABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
^
DROP TABLE unique_tbl; DROP TABLE unique_tbl;
-- --
-- EXCLUDE constraints -- EXCLUDE constraints

View File

@ -1359,7 +1359,7 @@ LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ...
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NO INHERIT; ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NO INHERIT;
ERROR: constraint "fktable_fk_fkey" of relation "fktable" is not a not-null constraint ERROR: constraint "fktable_fk_fkey" of relation "fktable" is not a not-null constraint
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID; ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
ERROR: constraints cannot be altered to be NOT VALID ERROR: FOREIGN KEY constraints cannot be marked NOT VALID
LINE 1: ...ER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID; LINE 1: ...ER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
^ ^
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey ENFORCED NOT ENFORCED; ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey ENFORCED NOT ENFORCED;

View File

@ -25,7 +25,6 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N'); ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N'); ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N'); ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln; return next ln;
end loop; end loop;
end; end;
@ -501,62 +500,3 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost; RESET parallel_tuple_cost;
RESET parallel_setup_cost; RESET parallel_setup_cost;
RESET min_parallel_table_scan_size; RESET min_parallel_table_scan_size;
-- Ensure memoize works for ANTI joins
CREATE TABLE tab_anti (a int, b boolean);
INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
ANALYZE tab_anti;
-- Ensure we get a Memoize plan for ANTI join
SELECT explain_memoize('
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;', false);
explain_memoize
--------------------------------------------------------------------------------------------
Aggregate (actual rows=1.00 loops=N)
-> Nested Loop Anti Join (actual rows=33.00 loops=N)
-> Seq Scan on tab_anti t1 (actual rows=100.00 loops=N)
-> Memoize (actual rows=0.67 loops=N)
Cache Key: (t1.a + 1), t1.a
Cache Mode: binary
Hits: 97 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Subquery Scan on t2 (actual rows=0.67 loops=N)
Filter: ((t1.a + 1) = t2.a)
Rows Removed by Filter: 2
-> Unique (actual rows=2.67 loops=N)
-> Sort (actual rows=67.33 loops=N)
Sort Key: t2_1.a
Sort Method: quicksort Memory: NkB
-> Seq Scan on tab_anti t2_1 (actual rows=100.00 loops=N)
(15 rows)
-- And check we get the expected results.
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;
count
-------
33
(1 row)
-- Ensure we do not add memoize node for SEMI join
EXPLAIN (COSTS OFF)
SELECT * FROM tab_anti t1 WHERE t1.a IN
(SELECT a FROM tab_anti t2 WHERE t2.b IN
(SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
QUERY PLAN
-------------------------------------------------
Nested Loop Semi Join
-> Seq Scan on tab_anti t1
-> Nested Loop Semi Join
Join Filter: (t1.a = t2.a)
-> Seq Scan on tab_anti t2
-> Subquery Scan on "ANY_subquery"
Filter: (t2.b = "ANY_subquery".b)
-> Result
One-Time Filter: (t2.a > 1)
-> Seq Scan on tab_anti t3
(10 rows)
DROP TABLE tab_anti;

View File

@ -1464,21 +1464,9 @@ ERROR: count must be greater than zero
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888); SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
ERROR: lower bound cannot equal upper bound ERROR: lower bound cannot equal upper bound
SELECT width_bucket('NaN', 3.0, 4.0, 888); SELECT width_bucket('NaN', 3.0, 4.0, 888);
width_bucket ERROR: operand, lower bound, and upper bound cannot be NaN
--------------
889
(1 row)
SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
width_bucket
--------------
889
(1 row)
SELECT width_bucket(0, 'NaN', 4.0, 888);
ERROR: lower and upper bounds cannot be NaN
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
ERROR: lower and upper bounds cannot be NaN ERROR: operand, lower bound, and upper bound cannot be NaN
SELECT width_bucket(2.0, 3.0, '-inf', 888); SELECT width_bucket(2.0, 3.0, '-inf', 888);
ERROR: lower and upper bounds must be finite ERROR: lower and upper bounds must be finite
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);

View File

@ -143,83 +143,38 @@ SELECT proname, proargtypes, pronamespace
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1; ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
-- --
-- Forwards scan RowCompare qual whose row arg has a NULL that affects our -- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
-- initial positioning strategy
-- --
explain (costs off) explain (costs off)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace; ORDER BY proname, proargtypes, pronamespace;
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace; ORDER BY proname, proargtypes, pronamespace;
-- --
-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan -- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
-- that ends scan
-- --
explain (costs off) explain (costs off)
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL) WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
ORDER BY proname, proargtypes, pronamespace;
--
-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
SELECT proname, proargtypes, pronamespace SELECT proname, proargtypes, pronamespace
FROM pg_proc FROM pg_proc
WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL) WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
-- --
-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan -- Add coverage for recheck of > key following array advancement on previous
-- -- (left sibling) page that used a high key whose attribute value corresponding
explain (costs off) -- to the > key was -inf (due to being truncated when the high key was created).
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
-- initially marked required (test case relies on current row compare
-- preprocessing limitations)
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
ORDER BY proname, proargtypes, pronamespace;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
ORDER BY proname, proargtypes, pronamespace;
--
-- Performs a recheck of > key following array advancement on previous (left
-- sibling) page that used a high key whose attribute value corresponding to
-- the > key was -inf (due to being truncated when the high key was created).
-- --
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated -- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only -- high key "(183, -inf)" on the first page that we'll scan. The test will only

View File

@ -537,9 +537,6 @@ CREATE TABLE UNIQUE_NOTEN_TBL(i int UNIQUE NOT ENFORCED);
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED; ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED; ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
-- can't make an existing constraint NOT VALID
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
DROP TABLE unique_tbl; DROP TABLE unique_tbl;
-- --

View File

@ -26,7 +26,6 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N'); ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N'); ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N'); ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln; return next ln;
end loop; end loop;
end; end;
@ -245,29 +244,3 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost; RESET parallel_tuple_cost;
RESET parallel_setup_cost; RESET parallel_setup_cost;
RESET min_parallel_table_scan_size; RESET min_parallel_table_scan_size;
-- Ensure memoize works for ANTI joins
CREATE TABLE tab_anti (a int, b boolean);
INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
ANALYZE tab_anti;
-- Ensure we get a Memoize plan for ANTI join
SELECT explain_memoize('
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;', false);
-- And check we get the expected results.
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;
-- Ensure we do not add memoize node for SEMI join
EXPLAIN (COSTS OFF)
SELECT * FROM tab_anti t1 WHERE t1.a IN
(SELECT a FROM tab_anti t2 WHERE t2.b IN
(SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
DROP TABLE tab_anti;

View File

@ -869,8 +869,6 @@ SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5); SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888); SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
SELECT width_bucket('NaN', 3.0, 4.0, 888); SELECT width_bucket('NaN', 3.0, 4.0, 888);
SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
SELECT width_bucket(0, 'NaN', 4.0, 888);
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
SELECT width_bucket(2.0, 3.0, '-inf', 888); SELECT width_bucket(2.0, 3.0, '-inf', 888);
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);

View File

@ -7,7 +7,7 @@ tests += {
'tap': { 'tap': {
'env': { 'env': {
'with_ssl': ssl_library, 'with_ssl': ssl_library,
'OPENSSL': openssl.found() ? openssl.full_path() : '', 'OPENSSL': openssl.found() ? openssl.path() : '',
}, },
'tests': [ 'tests': [
't/001_ssltests.pl', 't/001_ssltests.pl',

View File

@ -601,7 +601,6 @@ DR_intorel
DR_printtup DR_printtup
DR_sqlfunction DR_sqlfunction
DR_transientrel DR_transientrel
DSMREntryType
DSMRegistryCtxStruct DSMRegistryCtxStruct
DSMRegistryEntry DSMRegistryEntry
DWORD DWORD
@ -1291,7 +1290,6 @@ InjectionPointCacheEntry
InjectionPointCallback InjectionPointCallback
InjectionPointCondition InjectionPointCondition
InjectionPointConditionType InjectionPointConditionType
InjectionPointData
InjectionPointEntry InjectionPointEntry
InjectionPointSharedState InjectionPointSharedState
InjectionPointsCtl InjectionPointsCtl
@ -1739,9 +1737,6 @@ Name
NameData NameData
NameHashEntry NameHashEntry
NamedArgExpr NamedArgExpr
NamedDSAState
NamedDSHState
NamedDSMState
NamedLWLockTranche NamedLWLockTranche
NamedLWLockTrancheRequest NamedLWLockTrancheRequest
NamedTuplestoreScan NamedTuplestoreScan
@ -3011,7 +3006,6 @@ Tcl_Obj
Tcl_Size Tcl_Size
Tcl_Time Tcl_Time
TempNamespaceStatus TempNamespaceStatus
TestDSMRegistryHashEntry
TestDSMRegistryStruct TestDSMRegistryStruct
TestDecodingData TestDecodingData
TestDecodingTxnData TestDecodingTxnData