Compare commits

...

3 Commits

Author SHA1 Message Date
Tom Lane
445bd37b19 Correctly copy the target host identification in PQcancelCreate.
PQcancelCreate failed to copy struct pg_conn_host's "type" field,
instead leaving it zero (a/k/a CHT_HOST_NAME).  This seemingly
has no great ill effects if it should have been CHT_UNIX_SOCKET
instead, but if it should have been CHT_HOST_ADDRESS then a
null-pointer dereference will occur when the cancelConn is used.

Bug: #18974
Reported-by: Maxim Boguk <maxim.boguk@gmail.com>
Author: Sergei Kornilov <sk@zsrv.org>
Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us>
Discussion: https://postgr.es/m/18974-575f02b2168b36b3@postgresql.org
Backpatch-through: 17
2025-07-02 15:47:59 -04:00
Masahiko Sawada
792238c8b4 Fix missing FSM vacuum opportunities on tables without indexes.
Commit c120550edb86 optimized the vacuuming of relations without
indexes (a.k.a. one-pass strategy) by directly marking dead item IDs
as LP_UNUSED. However, the periodic FSM vacuum was still checking if
dead item IDs had been marked as LP_DEAD when attempting to vacuum the
FSM every VACUUM_FSM_EVERY_PAGES blocks. This condition was never met
due to the optimization, resulting in missed FSM vacuum
opportunities.

This commit modifies the periodic FSM vacuum condition to use the
number of tuples deleted during HOT pruning. This count includes items
marked as either LP_UNUSED or LP_REDIRECT, both of which are expected
to result in new free space to report.

Back-patch to v17 where the vacuum optimization for tables with no
indexes was introduced.

Reviewed-by: Melanie Plageman <melanieplageman@gmail.com>
Discussion: https://postgr.es/m/CAD21AoBL8m6B9GSzQfYxVaEgvD7-Kr3AJaS-hJPHC+avm-29zw@mail.gmail.com
Backpatch-through: 17
2025-07-01 23:25:15 -07:00
Michael Paquier
0740034318 Fix bug in archive streamer with LZ4 decompression
When decompressing some input data, the calculation for the initial
starting point and the initial size were incorrect, potentially leading
to failures when decompressing contents with LZ4.  These initialization
points are fixed in this commit, bringing the logic closer to what
exists for gzip and zstd.

The contents of the compressed data is clear (for example backups taken
with LZ4 can still be decompressed with a "lz4" command), only the
decompression part reading the input data was impacted by this issue.

This code path impacts pg_basebackup and pg_verifybackup, which can use
the LZ4 decompression routines with an archive streamer, or any tools
that try to use the archive streamers in src/fe_utils/.

The issue is easier to reproduce with files that have a low-compression
rate, like ones filled with random data, for a size of at least 512kB,
but this could happen with anything as long as it is stored in a data
folder.  Some tests are added based on this idea, with a file filled
with random bytes grabbed from the backend, written at the root of the
data folder.  This is proving good enough to reproduce the original
problem.

Author: Mikhail Gribkov <youzhick@gmail.com>
Discussion: https://postgr.es/m/CAMEv5_uQS1Hg6KCaEP2JkrTBbZ-nXQhxomWrhYQvbdzR-zy-wA@mail.gmail.com
Backpatch-through: 15
2025-07-02 13:48:43 +09:00
5 changed files with 63 additions and 8 deletions

View File

@ -235,7 +235,7 @@ static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis);
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
bool sharelock, Buffer vmbuffer);
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
static int lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
Buffer vmbuffer, bool all_visible_according_to_vm,
bool *has_lpdead_items);
@ -844,6 +844,7 @@ lazy_scan_heap(LVRelState *vacrel)
{
Buffer buf;
Page page;
int ndeleted = 0;
bool has_lpdead_items;
bool got_cleanup_lock = false;
@ -973,9 +974,9 @@ lazy_scan_heap(LVRelState *vacrel)
* line pointers previously marked LP_DEAD.
*/
if (got_cleanup_lock)
lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, all_visible_according_to_vm,
&has_lpdead_items);
ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, all_visible_according_to_vm,
&has_lpdead_items);
/*
* Now drop the buffer lock and, potentially, update the FSM.
@ -1011,7 +1012,7 @@ lazy_scan_heap(LVRelState *vacrel)
* table has indexes. There will only be newly-freed space if we
* held the cleanup lock and lazy_scan_prune() was called.
*/
if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@ -1402,8 +1403,10 @@ cmpOffsetNumbers(const void *a, const void *b)
*
* *has_lpdead_items is set to true or false depending on whether, upon return
* from this function, any LP_DEAD items are still present on the page.
*
* Returns the number of tuples deleted from the page during HOT pruning.
*/
static void
static int
lazy_scan_prune(LVRelState *vacrel,
Buffer buf,
BlockNumber blkno,
@ -1623,6 +1626,8 @@ lazy_scan_prune(LVRelState *vacrel,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN);
}
return presult.ndeleted;
}
/*

View File

@ -320,9 +320,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
avail_in = len;
avail_out = mystreamer->base.bbs_buffer.maxlen;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
while (avail_in > 0)
{

View File

@ -16,6 +16,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@ -42,6 +58,14 @@ my @test_configuration = (
'decompress_flags' => [ '-d', '-m' ],
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'server-lz4:5' ],
'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
'decompress_flags' => [ '-d', '-m' ],
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'server-zstd' ],

View File

@ -15,6 +15,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
# Create file with some random data and an arbitrary size, useful to check
# the solidity of the compression and decompression logic. The size of the
# file is chosen to be around 640kB. This has proven to be large enough to
# detect some issues related to LZ4, and low enough to not impact the runtime
# of the test significantly.
my $junk_data = $primary->safe_psql(
'postgres', qq(
SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '')
FROM generate_series(1, 10240) s(i);));
my $data_dir = $primary->data_dir;
my $junk_file = "$data_dir/junk";
open my $jf, '>', $junk_file
or die "Could not create junk file: $!";
print $jf $junk_data;
close $jf;
my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@ -42,6 +58,15 @@ my @test_configuration = (
'output_file' => 'base.tar',
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'lz4',
'backup_flags' => [ '--compress', 'client-lz4:1' ],
'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
'decompress_flags' => ['-d'],
'output_file' => 'base.tar',
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'client-zstd:5' ],

View File

@ -119,6 +119,7 @@ PQcancelCreate(PGconn *conn)
goto oom_error;
originalHost = conn->connhost[conn->whichhost];
cancelConn->connhost[0].type = originalHost.type;
if (originalHost.host)
{
cancelConn->connhost[0].host = strdup(originalHost.host);