With GB18030, prevent SIGSEGV from reading past end of allocation.

With GB18030 as source encoding, applications could crash the server via
SQL functions convert() or convert_from().  Applications themselves
could crash after passing unterminated GB18030 input to libpq functions
PQescapeLiteral(), PQescapeIdentifier(), PQescapeStringConn(), or
PQescapeString().  Extension code could crash by passing unterminated
GB18030 input to jsonapi.h functions.  All those functions have been
intended to handle untrusted, unterminated input safely.

A crash required allocating the input such that the last byte of the
allocation was the last byte of a virtual memory page.  Some malloc()
implementations take measures against that, making the SIGSEGV hard to
reach.  Back-patch to v13 (all supported versions).

Author: Noah Misch <noah@leadboat.com>
Author: Andres Freund <andres@anarazel.de>
Reviewed-by: Masahiko Sawada <sawada.mshk@gmail.com>
Backpatch-through: 13
Security: CVE-2025-4207
This commit is contained in:
Noah Misch 2025-05-05 04:52:04 -07:00
parent 7279e58205
commit cbadeaca92
7 changed files with 171 additions and 17 deletions

View File

@ -971,7 +971,7 @@ pg_mbcliplen(const char *mbstr, int len, int limit)
}
/*
* pg_mbcliplen with specified encoding
* pg_mbcliplen with specified encoding; string must be valid in encoding
*/
int
pg_encoding_mbcliplen(int encoding, const char *mbstr,
@ -1569,12 +1569,12 @@ check_encoding_conversion_args(int src_encoding,
* report_invalid_encoding: complain about invalid multibyte character
*
* note: len is remaining length of string, not length of character;
* len must be greater than zero, as we always examine the first byte.
* len must be greater than zero (or we'd neglect initializing "buf").
*/
void
report_invalid_encoding(int encoding, const char *mbstr, int len)
{
int l = pg_encoding_mblen(encoding, mbstr);
int l = pg_encoding_mblen_or_incomplete(encoding, mbstr, len);
char buf[8 * 5 + 1];
char *p = buf;
int j,
@ -1601,18 +1601,26 @@ report_invalid_encoding(int encoding, const char *mbstr, int len)
* report_untranslatable_char: complain about untranslatable character
*
* note: len is remaining length of string, not length of character;
* len must be greater than zero, as we always examine the first byte.
* len must be greater than zero (or we'd neglect initializing "buf").
*/
void
report_untranslatable_char(int src_encoding, int dest_encoding,
const char *mbstr, int len)
{
int l = pg_encoding_mblen(src_encoding, mbstr);
int l;
char buf[8 * 5 + 1];
char *p = buf;
int j,
jlimit;
/*
* We probably could use plain pg_encoding_mblen(), because
* gb18030_to_utf8() verifies before it converts. All conversions should.
* For src_encoding!=GB18030, len>0 meets pg_encoding_mblen() needs. Even
* so, be defensive, since a buggy conversion might pass invalid data.
* This is not a performance-critical path.
*/
l = pg_encoding_mblen_or_incomplete(src_encoding, mbstr, len);
jlimit = Min(l, len);
jlimit = Min(jlimit, 8); /* prevent buffer overrun */

View File

@ -698,8 +698,11 @@ json_lex_string(JsonLexContext *lex)
} while (0)
#define FAIL_AT_CHAR_END(code) \
do { \
char *term = s + pg_encoding_mblen(lex->input_encoding, s); \
lex->token_terminator = (term <= end) ? term : end; \
ptrdiff_t remaining = end - s; \
int charlen; \
charlen = pg_encoding_mblen_or_incomplete(lex->input_encoding, \
s, remaining); \
lex->token_terminator = (charlen <= remaining) ? s + charlen : end; \
return code; \
} while (0)

View File

@ -12,6 +12,8 @@
*/
#include "c.h"
#include <limits.h>
#include "mb/pg_wchar.h"
@ -1597,10 +1599,27 @@ const pg_wchar_tbl pg_wchar_table[] = {
/*
* Returns the byte length of a multibyte character.
*
* Caution: when dealing with text that is not certainly valid in the
* specified encoding, the result may exceed the actual remaining
* string length. Callers that are not prepared to deal with that
* should use pg_encoding_mblen_bounded() instead.
* Choose "mblen" functions based on the input string characteristics.
* pg_encoding_mblen() can be used when ANY of these conditions are met:
*
* - The input string is zero-terminated
*
* - The input string is known to be valid in the encoding (e.g., string
* converted from database encoding)
*
* - The encoding is not GB18030 (e.g., when only database encodings are
* passed to 'encoding' parameter)
*
* encoding==GB18030 requires examining up to two bytes to determine character
* length. Therefore, callers satisfying none of those conditions must use
* pg_encoding_mblen_or_incomplete() instead, as access to mbstr[1] cannot be
* guaranteed to be within allocation bounds.
*
* When dealing with text that is not certainly valid in the specified
* encoding, the result may exceed the actual remaining string length.
* Callers that are not prepared to deal with that should use Min(remaining,
* pg_encoding_mblen_or_incomplete()). For zero-terminated strings, that and
* pg_encoding_mblen_bounded() are interchangeable.
*/
int
pg_encoding_mblen(int encoding, const char *mbstr)
@ -1611,8 +1630,28 @@ pg_encoding_mblen(int encoding, const char *mbstr)
}
/*
* Returns the byte length of a multibyte character; but not more than
* the distance to end of string.
* Returns the byte length of a multibyte character (possibly not
* zero-terminated), or INT_MAX if too few bytes remain to determine a length.
*/
int
pg_encoding_mblen_or_incomplete(int encoding, const char *mbstr,
size_t remaining)
{
/*
* Define zero remaining as too few, even for single-byte encodings.
* pg_gb18030_mblen() reads one or two bytes; single-byte encodings read
* zero; others read one.
*/
if (remaining < 1 ||
(encoding == PG_GB18030 && IS_HIGHBIT_SET(*mbstr) && remaining < 2))
return INT_MAX;
return pg_encoding_mblen(encoding, mbstr);
}
/*
* Returns the byte length of a multibyte character; but not more than the
* distance to the terminating zero byte. For input that might lack a
* terminating zero, use Min(remaining, pg_encoding_mblen_or_incomplete()).
*/
int
pg_encoding_mblen_bounded(int encoding, const char *mbstr)

View File

@ -554,6 +554,8 @@ extern int pg_valid_server_encoding_id(int encoding);
*/
extern void pg_encoding_set_invalid(int encoding, char *dst);
extern int pg_encoding_mblen(int encoding, const char *mbstr);
extern int pg_encoding_mblen_or_incomplete(int encoding, const char *mbstr,
size_t remaining);
extern int pg_encoding_mblen_bounded(int encoding, const char *mbstr);
extern int pg_encoding_dsplen(int encoding, const char *mbstr);
extern int pg_encoding_verifymb(int encoding, const char *mbstr, int len);

View File

@ -3373,7 +3373,8 @@ PQescapeStringInternal(PGconn *conn,
}
/* Slow path for possible multibyte characters */
charlen = pg_encoding_mblen(encoding, source);
charlen = pg_encoding_mblen_or_incomplete(encoding,
source, remaining);
if (remaining < charlen ||
pg_encoding_verifymbchar(encoding, source, charlen) == -1)
@ -3513,7 +3514,8 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
int charlen;
/* Slow path for possible multibyte characters */
charlen = pg_encoding_mblen(conn->client_encoding, s);
charlen = pg_encoding_mblen_or_incomplete(conn->client_encoding,
s, remaining);
if (charlen > remaining)
{

View File

@ -1227,8 +1227,9 @@ pqSocketPoll(int sock, int forRead, int forWrite, time_t end_time)
*/
/*
* returns the byte length of the character beginning at s, using the
* specified encoding.
* Like pg_encoding_mblen(). Use this in callers that want the
* dynamically-linked libpq's stance on encodings, even if that means
* different behavior in different startups of the executable.
*/
int
PQmblen(const char *s, int encoding)

View File

@ -12,6 +12,7 @@
#include <string.h>
#include <stdio.h>
#include "common/jsonapi.h"
#include "fe_utils/psqlscan.h"
#include "fe_utils/string_utils.h"
#include "getopt_long.h"
@ -164,6 +165,91 @@ encoding_conflicts_ascii(int encoding)
}
/*
* Confirm escaping doesn't read past the end of an allocation. Consider the
* result of malloc(4096), in the absence of freelist entries satisfying the
* allocation. On OpenBSD, reading one byte past the end of that object
* yields SIGSEGV.
*
* Run this test before the program's other tests, so freelists are minimal.
* len=4096 didn't SIGSEGV, likely due to free() calls in libpq. len=8192
* did. Use 128 KiB, to somewhat insulate the outcome from distant new free()
* calls and libc changes.
*/
static void
test_gb18030_page_multiple(pe_test_config *tc)
{
PQExpBuffer testname;
size_t input_len = 0x20000;
char *input;
/* prepare input */
input = pg_malloc(input_len);
memset(input, '-', input_len - 1);
input[input_len - 1] = 0xfe;
/* name to describe the test */
testname = createPQExpBuffer();
appendPQExpBuffer(testname, ">repeat(%c, %zu)", input[0], input_len - 1);
escapify(testname, input + input_len - 1, 1);
appendPQExpBuffer(testname, "< - GB18030 - PQescapeLiteral");
/* test itself */
PQsetClientEncoding(tc->conn, "GB18030");
report_result(tc, PQescapeLiteral(tc->conn, input, input_len) == NULL,
testname->data, "",
"input validity vs escape success", "ok");
destroyPQExpBuffer(testname);
pg_free(input);
}
/*
* Confirm json parsing doesn't read past the end of an allocation. This
* exercises wchar.c infrastructure like the true "escape" tests do, but this
* isn't an "escape" test.
*/
static void
test_gb18030_json(pe_test_config *tc)
{
PQExpBuffer raw_buf;
PQExpBuffer testname;
const char input[] = "{\"\\u\xFE";
size_t input_len = sizeof(input) - 1;
JsonLexContext *lex;
JsonSemAction sem = {0}; /* no callbacks */
JsonParseErrorType json_error;
char *error_str;
/* prepare input like test_one_vector_escape() does */
raw_buf = createPQExpBuffer();
appendBinaryPQExpBuffer(raw_buf, input, input_len);
appendPQExpBufferStr(raw_buf, NEVER_ACCESS_STR);
VALGRIND_MAKE_MEM_NOACCESS(&raw_buf->data[input_len],
raw_buf->len - input_len);
/* name to describe the test */
testname = createPQExpBuffer();
appendPQExpBuffer(testname, ">");
escapify(testname, input, input_len);
appendPQExpBuffer(testname, "< - GB18030 - pg_parse_json");
/* test itself */
lex = makeJsonLexContextCstringLen(raw_buf->data, input_len,
PG_GB18030, false);
json_error = pg_parse_json(lex, &sem);
error_str = psprintf("JsonParseErrorType %d", json_error);
report_result(tc, json_error == JSON_UNICODE_ESCAPE_FORMAT,
testname->data, "",
"diagnosed", error_str);
pfree(error_str);
pfree(lex);
destroyPQExpBuffer(testname);
destroyPQExpBuffer(raw_buf);
}
static bool
escape_literal(PGconn *conn, PQExpBuffer target,
const char *unescaped, size_t unescaped_len,
@ -454,8 +540,18 @@ static pe_test_vector pe_test_vectors[] =
* Testcases that are not null terminated for the specified input length.
* That's interesting to verify that escape functions don't read beyond
* the intended input length.
*
* One interesting special case is GB18030, which has the odd behaviour
* needing to read beyond the first byte to determine the length of a
* multi-byte character.
*/
TV_LEN("gbk", "\x80", 1),
TV_LEN("GB18030", "\x80", 1),
TV_LEN("GB18030", "\x80\0", 2),
TV_LEN("GB18030", "\x80\x30", 2),
TV_LEN("GB18030", "\x80\x30\0", 3),
TV_LEN("GB18030", "\x80\x30\x30", 3),
TV_LEN("GB18030", "\x80\x30\x30\0", 4),
TV_LEN("UTF-8", "\xC3\xb6 ", 1),
TV_LEN("UTF-8", "\xC3\xb6 ", 2),
};
@ -864,6 +960,9 @@ main(int argc, char *argv[])
exit(1);
}
test_gb18030_page_multiple(&tc);
test_gb18030_json(&tc);
for (int i = 0; i < lengthof(pe_test_vectors); i++)
{
test_one_vector(&tc, &pe_test_vectors[i]);