Compare commits

..

No commits in common. "445bd37b19c297b56a7e3b24a69b3648918d2c31" and "c6d0ef160e948b43222020e6199977c88969537b" have entirely different histories.

5 changed files with 8 additions and 63 deletions

View File

@ -235,7 +235,7 @@ static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis);
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
bool sharelock, Buffer vmbuffer);
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf,
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
Buffer vmbuffer, bool all_visible_according_to_vm,
bool *has_lpdead_items);
@ -844,7 +844,6 @@ lazy_scan_heap(LVRelState *vacrel)
{
Buffer buf;
Page page;
int ndeleted = 0;
bool has_lpdead_items;
bool got_cleanup_lock = false;
@ -974,9 +973,9 @@ lazy_scan_heap(LVRelState *vacrel)
* line pointers previously marked LP_DEAD.
*/
if (got_cleanup_lock)
ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, all_visible_according_to_vm,
&has_lpdead_items);
lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, all_visible_according_to_vm,
&has_lpdead_items);
/*
* Now drop the buffer lock and, potentially, update the FSM.
@ -1012,7 +1011,7 @@ lazy_scan_heap(LVRelState *vacrel)
* table has indexes. There will only be newly-freed space if we
* held the cleanup lock and lazy_scan_prune() was called.
*/
if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@ -1403,10 +1402,8 @@ cmpOffsetNumbers(const void *a, const void *b)
*
* *has_lpdead_items is set to true or false depending on whether, upon return
* from this function, any LP_DEAD items are still present on the page.
*
* Returns the number of tuples deleted from the page during HOT pruning.
*/
static int
static void
lazy_scan_prune(LVRelState *vacrel,
Buffer buf,
BlockNumber blkno,
@ -1626,8 +1623,6 @@ lazy_scan_prune(LVRelState *vacrel,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN);
}
return presult.ndeleted;
}
/*

View File

@ -320,9 +320,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
avail_in = len;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
avail_out = mystreamer->base.bbs_buffer.maxlen;
while (avail_in > 0)
{

View File

@ -16,22 +16,6 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@ -58,14 +42,6 @@ my @test_configuration = (
'decompress_flags' => [ '-d', '-m' ],
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'server-lz4:5' ],
'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
'decompress_flags' => [ '-d', '-m' ],
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'server-zstd' ],

View File

@ -15,22 +15,6 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@ -58,15 +42,6 @@ my @test_configuration = (
'output_file' => 'base.tar',
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'client-lz4:1' ],
'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
'decompress_flags' => ['-d'],
'output_file' => 'base.tar',
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'client-zstd:5' ],

View File

@ -119,7 +119,6 @@ PQcancelCreate(PGconn *conn)
goto oom_error;
originalHost = conn->connhost[conn->whichhost];
cancelConn->connhost[0].type = originalHost.type;
if (originalHost.host)
{
cancelConn->connhost[0].host = strdup(originalHost.host);