Compare commits

..

No commits in common. "036decbba2af60dfed4afe564689a0475ef844ab" and "a912118c6055f683d51763546ee3c14b6e732893" have entirely different histories.

4 changed files with 65 additions and 134 deletions

View File

@ -422,7 +422,7 @@ pgstat_btree_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno,
/* fully empty page */
stat->free_space += BLCKSZ;
}
else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(BTPageOpaqueData)))
else
{
BTPageOpaque opaque;
@ -456,16 +456,10 @@ pgstat_hash_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno,
Buffer buf;
Page page;
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buf, HASH_READ);
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_READ, 0, bstrategy);
page = BufferGetPage(buf);
if (PageIsNew(page))
{
/* fully empty page */
stat->free_space += BLCKSZ;
}
else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData)))
if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData)))
{
HashPageOpaque opaque;
@ -506,23 +500,17 @@ pgstat_gist_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno,
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buf, GIST_SHARE);
gistcheckpage(rel, buf);
page = BufferGetPage(buf);
if (PageIsNew(page))
if (GistPageIsLeaf(page))
{
/* fully empty page */
stat->free_space += BLCKSZ;
pgstat_index_page(stat, page, FirstOffsetNumber,
PageGetMaxOffsetNumber(page));
}
else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(GISTPageOpaqueData)))
else
{
if (GistPageIsLeaf(page))
{
pgstat_index_page(stat, page, FirstOffsetNumber,
PageGetMaxOffsetNumber(page));
}
else
{
/* root or node */
}
/* root or node */
}
UnlockReleaseBuffer(buf);

View File

@ -6,12 +6,10 @@ This module contains two programs for testing the json parsers.
- `test_json_parser_incremental` is for testing the incremental parser, It
reads in a file and passes it in very small chunks (default is 60 bytes at a
time) to the incremental parser. It's not meant to be a speed test but to
test the accuracy of the incremental parser. The option "-c nn" specifies an
alternative chunk size, "-r nn" runs a range of chunk sizes down to one byte
on the same input (with output separated by null bytes), and "-s" specifies
using semantic routines. The semantic routines re-output the json, although
not in a very pretty form. The required non-option argument is the input file
name.
test the accuracy of the incremental parser. There are two option arguments,
"-c nn" specifies an alternative chunk size, and "-s" specifies using
semantic routines. The semantic routines re-output the json, although not in
a very pretty form. The required non-option argument is the input file name.
- `test_json_parser_perf` is for speed testing both the standard
recursive descent parser and the non-recursive incremental
parser. If given the `-i` flag it uses the non-recursive parser,

View File

@ -33,37 +33,23 @@ sub test
print $fh "$json";
close($fh);
# The -r mode runs the parser in a loop, with output separated by nulls.
# Unpack that as a list of null-terminated ASCII strings (Z*) and check that
# each run produces the same result.
my ($all_stdout, $all_stderr) =
run_command([ $exe, "-r", $chunk, $fname ]);
my @stdout = unpack("(Z*)*", $all_stdout);
my @stderr = unpack("(Z*)*", $all_stderr);
is(scalar @stdout, $chunk, "$name: stdout has correct number of entries");
is(scalar @stderr, $chunk, "$name: stderr has correct number of entries");
my $i = 0;
foreach my $size (reverse(1 .. $chunk))
{
my ($stdout, $stderr) = run_command([ $exe, "-c", $size, $fname ]);
if (defined($params{error}))
{
unlike($stdout[$i], qr/SUCCESS/,
unlike($stdout, qr/SUCCESS/,
"$name, chunk size $size: test fails");
like($stderr[$i], $params{error},
like($stderr, $params{error},
"$name, chunk size $size: correct error output");
}
else
{
like($stdout[$i], qr/SUCCESS/,
like($stdout, qr/SUCCESS/,
"$name, chunk size $size: test succeeds");
is($stderr[$i], "", "$name, chunk size $size: no error output");
is($stderr, "", "$name, chunk size $size: no error output");
}
$i++;
}
}

View File

@ -12,14 +12,9 @@
* the parser in very small chunks. In practice you would normally use
* much larger chunks, but doing this makes it more likely that the
* full range of increment handling, especially in the lexer, is exercised.
*
* If the "-c SIZE" option is provided, that chunk size is used instead
* of the default of 60.
*
* If the "-r SIZE" option is provided, a range of chunk sizes from SIZE down to
* 1 are run sequentially. A null byte is printed to the streams after each
* iteration.
*
* If the -s flag is given, the program does semantic processing. This should
* just mirror back the json, albeit with white space changes.
*
@ -87,24 +82,20 @@ main(int argc, char **argv)
StringInfoData json;
int n_read;
size_t chunk_size = DEFAULT_CHUNK_SIZE;
bool run_chunk_ranges = false;
struct stat statbuf;
off_t bytes_left;
JsonSemAction *testsem = &nullSemAction;
char *testfile;
int c;
bool need_strings = false;
int ret = 0;
pg_logging_init(argv[0]);
while ((c = getopt(argc, argv, "r:c:s")) != -1)
while ((c = getopt(argc, argv, "c:s")) != -1)
{
switch (c)
{
case 'r': /* chunk range */
run_chunk_ranges = true;
/* fall through */
case 'c': /* chunk size */
case 'c': /* chunksize */
chunk_size = strtou64(optarg, NULL, 10);
if (chunk_size > BUFSIZE)
pg_fatal("chunk size cannot exceed %d", BUFSIZE);
@ -130,6 +121,7 @@ main(int argc, char **argv)
exit(1);
}
makeJsonLexContextIncremental(&lex, PG_UTF8, need_strings);
initStringInfo(&json);
if ((json_file = fopen(testfile, PG_BINARY_R)) == NULL)
@ -138,90 +130,57 @@ main(int argc, char **argv)
if (fstat(fileno(json_file), &statbuf) != 0)
pg_fatal("error statting input: %m");
do
bytes_left = statbuf.st_size;
for (;;)
{
/*
* This outer loop only repeats in -r mode. Reset the parse state and
* our position in the input file for the inner loop, which performs
* the incremental parsing.
*/
off_t bytes_left = statbuf.st_size;
size_t to_read = chunk_size;
/* We will break when there's nothing left to read */
makeJsonLexContextIncremental(&lex, PG_UTF8, need_strings);
if (bytes_left < chunk_size)
chunk_size = bytes_left;
rewind(json_file);
resetStringInfo(&json);
n_read = fread(buff, 1, chunk_size, json_file);
if (n_read < chunk_size)
pg_fatal("error reading input file: %d", ferror(json_file));
for (;;)
{
/* We will break when there's nothing left to read */
if (bytes_left < to_read)
to_read = bytes_left;
n_read = fread(buff, 1, to_read, json_file);
if (n_read < to_read)
pg_fatal("error reading input file: %d", ferror(json_file));
appendBinaryStringInfo(&json, buff, n_read);
/*
* Append some trailing junk to the buffer passed to the parser.
* This helps us ensure that the parser does the right thing even
* if the chunk isn't terminated with a '\0'.
*/
appendStringInfoString(&json, "1+23 trailing junk");
bytes_left -= n_read;
if (bytes_left > 0)
{
result = pg_parse_json_incremental(&lex, testsem,
json.data, n_read,
false);
if (result != JSON_INCOMPLETE)
{
fprintf(stderr, "%s\n", json_errdetail(result, &lex));
ret = 1;
goto cleanup;
}
resetStringInfo(&json);
}
else
{
result = pg_parse_json_incremental(&lex, testsem,
json.data, n_read,
true);
if (result != JSON_SUCCESS)
{
fprintf(stderr, "%s\n", json_errdetail(result, &lex));
ret = 1;
goto cleanup;
}
if (!need_strings)
printf("SUCCESS!\n");
break;
}
}
cleanup:
freeJsonLexContext(&lex);
appendBinaryStringInfo(&json, buff, n_read);
/*
* In -r mode, separate output with nulls so that the calling test can
* split it up, decrement the chunk size, and loop back to the top.
* All other modes immediately fall out of the loop and exit.
* Append some trailing junk to the buffer passed to the parser. This
* helps us ensure that the parser does the right thing even if the
* chunk isn't terminated with a '\0'.
*/
if (run_chunk_ranges)
appendStringInfoString(&json, "1+23 trailing junk");
bytes_left -= n_read;
if (bytes_left > 0)
{
fputc('\0', stdout);
fputc('\0', stderr);
result = pg_parse_json_incremental(&lex, testsem,
json.data, n_read,
false);
if (result != JSON_INCOMPLETE)
{
fprintf(stderr, "%s\n", json_errdetail(result, &lex));
exit(1);
}
resetStringInfo(&json);
}
} while (run_chunk_ranges && (--chunk_size > 0));
else
{
result = pg_parse_json_incremental(&lex, testsem,
json.data, n_read,
true);
if (result != JSON_SUCCESS)
{
fprintf(stderr, "%s\n", json_errdetail(result, &lex));
exit(1);
}
if (!need_strings)
printf("SUCCESS!\n");
break;
}
}
fclose(json_file);
free(json.data);
return ret;
exit(0);
}
/*