Compare commits

..

No commits in common. "2f35c14cfb3dadede883a7d8f458e5a15f13a97b" and "65c5864d7fac46516f17ee89085e349a87ee5bd7" have entirely different histories.

11 changed files with 42 additions and 40 deletions

View File

@ -53,7 +53,6 @@ tests += {
'bd': meson.current_build_dir(),
'regress': {
'sql': [
'security',
'seg',
],
},

View File

@ -381,7 +381,7 @@ pgxml_xpath(text *document, xmlChar *xpath, xpath_workspace *workspace)
{
workspace->doctree = xmlReadMemory((char *) VARDATA_ANY(document),
docsize, NULL, NULL,
XML_PARSE_HUGE | XML_PARSE_NOENT);
XML_PARSE_NOENT);
if (workspace->doctree != NULL)
{
workspace->ctxt = xmlXPathNewContext(workspace->doctree);
@ -626,7 +626,7 @@ xpath_table(PG_FUNCTION_ARGS)
if (xmldoc)
doctree = xmlReadMemory(xmldoc, strlen(xmldoc),
NULL, NULL,
XML_PARSE_HUGE | XML_PARSE_NOENT);
XML_PARSE_NOENT);
else /* treat NULL as not well-formed */
doctree = NULL;

View File

@ -87,7 +87,7 @@ xslt_process(PG_FUNCTION_ARGS)
/* Parse document */
doctree = xmlReadMemory((char *) VARDATA_ANY(doct),
VARSIZE_ANY_EXHDR(doct), NULL, NULL,
XML_PARSE_HUGE | XML_PARSE_NOENT);
XML_PARSE_NOENT);
if (doctree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
@ -96,7 +96,7 @@ xslt_process(PG_FUNCTION_ARGS)
/* Same for stylesheet */
ssdoc = xmlReadMemory((char *) VARDATA_ANY(ssheet),
VARSIZE_ANY_EXHDR(ssheet), NULL, NULL,
XML_PARSE_HUGE | XML_PARSE_NOENT);
XML_PARSE_NOENT);
if (ssdoc == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,

View File

@ -1310,7 +1310,7 @@ CopyFrom(CopyFromState cstate)
if (cstate->opts.save_error_to != COPY_SAVE_ERROR_TO_ERROR &&
cstate->num_errors > 0)
ereport(NOTICE,
errmsg_plural("%llu row was skipped due to data type incompatibility",
errmsg_plural("%llu row were skipped due to data type incompatibility",
"%llu rows were skipped due to data type incompatibility",
(unsigned long long) cstate->num_errors,
(unsigned long long) cstate->num_errors));

View File

@ -303,7 +303,6 @@ pgwin32_socket(int af, int type, int protocol)
if (ioctlsocket(s, FIONBIO, &on))
{
TranslateSocketError();
closesocket(s);
return INVALID_SOCKET;
}
errno = 0;

View File

@ -890,7 +890,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
/*
* Parse XLOG_HEAP_INSERT (not MULTI_INSERT!) records into tuplebufs.
*
* Inserts can contain the new tuple.
* Deletes can contain the new tuple.
*/
static void
DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)

View File

@ -263,9 +263,9 @@ pgstat_beinit(void)
* Assign the MyBEEntry for an auxiliary process. Since it doesn't
* have a BackendId, the slot is statically allocated based on the
* auxiliary process type (MyAuxProcType). Backends use slots indexed
* in the range from 0 to MaxBackends (exclusive), so we use
* MaxBackends + AuxProcType as the index of the slot for an auxiliary
* process.
* in the range from 1 to MaxBackends (inclusive), so we use
* MaxBackends + AuxBackendType + 1 as the index of the slot for an
* auxiliary process.
*/
MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
}

View File

@ -1688,8 +1688,8 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and the input is not an XML document, the list
* of parsed nodes from the xmlParseInNodeContext call will be returned to
* *parsed_nodes.
* of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes.
*
* Errors normally result in ereport(ERROR), but if escontext is an
* ErrorSaveContext, then "safe" errors are reported there instead, and the
@ -1795,7 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL,
"UTF-8",
XML_PARSE_NOENT | XML_PARSE_DTDATTR | XML_PARSE_HUGE
XML_PARSE_NOENT | XML_PARSE_DTDATTR
| (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS));
if (doc == NULL || xmlerrcxt->err_occurred)
{
@ -1828,30 +1828,10 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
/* allow empty content */
if (*(utf8string + count))
{
const char *data;
xmlNodePtr root;
xmlNodePtr lst;
xmlParserErrors xml_error;
data = (const char *) (utf8string + count);
/*
* Create a fake root node. The xmlNewDoc() function creates
* an XML document without any nodes, and this is required for
* xmlParseInNodeContext() that is able to handle
* XML_PARSE_HUGE.
*/
root = xmlNewNode(NULL, (const xmlChar *) "content-root");
if (root == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate xml node");
xmlDocSetRootElement(doc, root);
/* Try to parse string with using root node context. */
xml_error = xmlParseInNodeContext(root, data, strlen(data),
XML_PARSE_HUGE,
parsed_nodes ? parsed_nodes : &lst);
if (xml_error != XML_ERR_OK || xmlerrcxt->err_occurred)
res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
utf8string + count,
parsed_nodes);
if (res_code != 0 || xmlerrcxt->err_occurred)
{
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
@ -4364,7 +4344,7 @@ xpath_internal(text *xpath_expr_text, xmltype *data, ArrayType *namespaces,
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
doc = xmlCtxtReadMemory(ctxt, (char *) string + xmldecl_len,
len - xmldecl_len, NULL, NULL, XML_PARSE_HUGE);
len - xmldecl_len, NULL, NULL, 0);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_DOCUMENT,
"could not parse XML document");
@ -4695,7 +4675,7 @@ XmlTableSetDocument(TableFuncScanState *state, Datum value)
PG_TRY();
{
doc = xmlCtxtReadMemory(xtCxt->ctxt, (char *) xstr, length, NULL, NULL, XML_PARSE_HUGE);
doc = xmlCtxtReadMemory(xtCxt->ctxt, (char *) xstr, length, NULL, NULL, 0);
if (doc == NULL || xtCxt->xmlerrcxt->err_occurred)
xml_ereport(xtCxt->xmlerrcxt, ERROR, ERRCODE_INVALID_XML_DOCUMENT,
"could not parse XML document");

View File

@ -3247,6 +3247,23 @@ PQsendPipelineSync(PGconn *conn)
/*
* Workhorse function for PQpipelineSync and PQsendPipelineSync.
*
* It's legal to start submitting more commands in the pipeline immediately,
* without waiting for the results of the current pipeline. There's no need to
* end pipeline mode and start it again.
*
* If a command in a pipeline fails, every subsequent command up to and
* including the result to the Sync message sent by pqPipelineSyncInternal
* gets set to PGRES_PIPELINE_ABORTED state. If the whole pipeline is
* processed without error, a PGresult with PGRES_PIPELINE_SYNC is produced.
*
* Queries can already have been sent before pqPipelineSyncInternal is called,
* but pqPipelineSyncInternal needs to be called before retrieving command
* results.
*
* The connection will remain in pipeline mode and unavailable for new
* synchronous command execution functions until all results from the pipeline
* are processed by the client.
*
* immediate_flush controls if the flush happens immediately after sending the
* Sync message or not.
*/

View File

@ -37,3 +37,9 @@ DROP TABLE user_logins;
DROP EVENT TRIGGER on_login_trigger;
DROP FUNCTION on_login_proc();
\c
SELECT dathasloginevt FROM pg_database WHERE datname= :'DBNAME';
dathasloginevt
----------------
f
(1 row)

View File

@ -22,3 +22,4 @@ DROP TABLE user_logins;
DROP EVENT TRIGGER on_login_trigger;
DROP FUNCTION on_login_proc();
\c
SELECT dathasloginevt FROM pg_database WHERE datname= :'DBNAME';