fixed -Wconversion for lib/decompress/zstd_decompress_block.c

This commit is contained in:
Yann Collet 2025-02-26 10:01:05 -08:00
parent 2413f17322
commit db2d205ada
4 changed files with 71 additions and 63 deletions

View File

@ -357,9 +357,9 @@ ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs
*/ */
MEM_STATIC MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add) const void* ZSTD_wrappedPtrAdd(const void* ptr, ptrdiff_t add)
{ {
return ptr + add; return (const char*)ptr + add;
} }
/** /**
@ -370,9 +370,9 @@ unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
*/ */
MEM_STATIC MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub) const void* ZSTD_wrappedPtrSub(const void* ptr, ptrdiff_t sub)
{ {
return ptr - sub; return (const char*)ptr - sub;
} }
/** /**
@ -382,9 +382,9 @@ unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
* @returns `ptr + add` except it defines `NULL + 0 == NULL`. * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
*/ */
MEM_STATIC MEM_STATIC
unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add) void* ZSTD_maybeNullPtrAdd(void* ptr, ptrdiff_t add)
{ {
return add > 0 ? ptr + add : ptr; return add > 0 ? (char*)ptr + add : ptr;
} }
/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an /* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an

View File

@ -213,7 +213,7 @@ typedef enum {
* The src buffer must be before the dst buffer. * The src buffer must be before the dst buffer.
*/ */
MEM_STATIC FORCE_INLINE_ATTR MEM_STATIC FORCE_INLINE_ATTR
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype) void ZSTD_wildcopy(void* dst, const void* src, size_t length, ZSTD_overlap_e const ovtype)
{ {
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
const BYTE* ip = (const BYTE*)src; const BYTE* ip = (const BYTE*)src;

View File

@ -707,7 +707,7 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
{ {
assert(iend > ilimit_w); assert(iend > ilimit_w);
if (ip <= ilimit_w) { if (ip <= ilimit_w) {
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); ZSTD_wildcopy(op, ip, (size_t)(ilimit_w - ip), ZSTD_no_overlap);
op += ilimit_w - ip; op += ilimit_w - ip;
ip = ilimit_w; ip = ilimit_w;
} }
@ -800,7 +800,7 @@ ZSTD_storeSeq(SeqStore_t* seqStorePtr,
ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(seqStorePtr->lit, literals); ZSTD_copy16(seqStorePtr->lit, literals);
if (litLength > 16) { if (litLength > 16) {
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, litLength-16, ZSTD_no_overlap);
} }
} else { } else {
ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);

View File

@ -16,14 +16,13 @@
*********************************************************/ *********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/compiler.h" /* prefetch */ #include "../common/compiler.h" /* prefetch */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */ #include "../common/mem.h" /* low level memory routines */
#include <stddef.h>
#define FSE_STATIC_LINKING_ONLY #define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h" #include "../common/fse.h"
#include "../common/huf.h" #include "../common/huf.h"
#include "../common/zstd_internal.h" #include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" #include "zstd_decompress_block.h"
#include "../common/bits.h" /* ZSTD_highbit32 */ #include "../common/bits.h" /* ZSTD_highbit32 */
@ -734,9 +733,10 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip++; ip++;
/* Build DTables */ /* Build DTables */
assert(ip <= iend);
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
LLtype, MaxLL, LLFSELog, LLtype, MaxLL, LLFSELog,
ip, iend-ip, ip, (size_t)(iend-ip),
LL_base, LL_bits, LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy, LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq, dctx->ddictIsCold, nbSeq,
@ -746,9 +746,10 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip += llhSize; ip += llhSize;
} }
assert(ip <= iend);
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
OFtype, MaxOff, OffFSELog, OFtype, MaxOff, OffFSELog,
ip, iend-ip, ip, (size_t)(iend-ip),
OF_base, OF_bits, OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy, OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq, dctx->ddictIsCold, nbSeq,
@ -758,9 +759,10 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip += ofhSize; ip += ofhSize;
} }
assert(ip <= iend);
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
MLtype, MaxML, MLFSELog, MLtype, MaxML, MLFSELog,
ip, iend-ip, ip, (size_t)(iend-ip),
ML_base, ML_bits, ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy, ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq, dctx->ddictIsCold, nbSeq,
@ -771,7 +773,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
} }
} }
return ip-istart; return (size_t)(ip-istart);
} }
@ -801,7 +803,8 @@ typedef struct {
* Precondition: *ip <= *op * Precondition: *ip <= *op
* Postcondition: *op - *op >= 8 * Postcondition: *op - *op >= 8
*/ */
HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset)
{
assert(*ip <= *op); assert(*ip <= *op);
if (offset < 8) { if (offset < 8) {
/* close range match, overlap */ /* close range match, overlap */
@ -834,7 +837,9 @@ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
* The src buffer must be before the dst buffer. * The src buffer must be before the dst buffer.
*/ */
static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { static void
ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, size_t length, ZSTD_overlap_e ovtype)
{
ptrdiff_t const diff = op - ip; ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length; BYTE* const oend = op + length;
@ -849,7 +854,8 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt
if (ovtype == ZSTD_overlap_src_before_dst) { if (ovtype == ZSTD_overlap_src_before_dst) {
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8); assert(length >= 8);
ZSTD_overlapCopy8(&op, &ip, diff); assert(diff > 0);
ZSTD_overlapCopy8(&op, &ip, (size_t)diff);
length -= 8; length -= 8;
assert(op - ip >= 8); assert(op - ip >= 8);
assert(op <= oend); assert(op <= oend);
@ -863,7 +869,7 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt
if (op <= oend_w) { if (op <= oend_w) {
/* Wildcopy until we get close to the end. */ /* Wildcopy until we get close to the end. */
assert(oend > oend_w); assert(oend > oend_w);
ZSTD_wildcopy(op, ip, oend_w - op, ovtype); ZSTD_wildcopy(op, ip, (size_t)(oend_w - op), ovtype);
ip += oend_w - op; ip += oend_w - op;
op += oend_w - op; op += oend_w - op;
} }
@ -874,7 +880,8 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt
/* ZSTD_safecopyDstBeforeSrc(): /* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src * This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) { static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, size_t length)
{
ptrdiff_t const diff = op - ip; ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length; BYTE* const oend = op + length;
@ -885,7 +892,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length
} }
if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); ZSTD_wildcopy(op, ip, (size_t)(oend - WILDCOPY_OVERLENGTH - op), ZSTD_no_overlap);
ip += oend - WILDCOPY_OVERLENGTH - op; ip += oend - WILDCOPY_OVERLENGTH - op;
op += oend - WILDCOPY_OVERLENGTH - op; op += oend - WILDCOPY_OVERLENGTH - op;
} }
@ -936,7 +943,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
return sequenceLength; return sequenceLength;
} }
/* span extDict & currentPrefixSegment */ /* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match; { size_t const length1 = (size_t)(dictEnd - match);
ZSTD_memmove(oLitEnd, match, length1); ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1; op = oLitEnd + length1;
sequence.matchLength -= length1; sequence.matchLength -= length1;
@ -985,7 +992,7 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
return sequenceLength; return sequenceLength;
} }
/* span extDict & currentPrefixSegment */ /* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match; { size_t const length1 = (size_t)(dictEnd - match);
ZSTD_memmove(oLitEnd, match, length1); ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1; op = oLitEnd + length1;
sequence.matchLength -= length1; sequence.matchLength -= length1;
@ -1058,7 +1065,7 @@ size_t ZSTD_execSequence(BYTE* op,
return sequenceLength; return sequenceLength;
} }
/* span extDict & currentPrefixSegment */ /* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match; { size_t const length1 = (size_t)(dictEnd - match);
ZSTD_memmove(oLitEnd, match, length1); ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1; op = oLitEnd + length1;
sequence.matchLength -= length1; sequence.matchLength -= length1;
@ -1079,7 +1086,7 @@ size_t ZSTD_execSequence(BYTE* op,
* longer than literals (in general). In silesia, ~10% of matches are longer * longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes. * than 16 bytes.
*/ */
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap);
return sequenceLength; return sequenceLength;
} }
assert(sequence.offset < WILDCOPY_VECLEN); assert(sequence.offset < WILDCOPY_VECLEN);
@ -1090,7 +1097,7 @@ size_t ZSTD_execSequence(BYTE* op,
/* If the match length is > 8 bytes, then continue with the wildcopy. */ /* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) { if (sequence.matchLength > 8) {
assert(op < oMatchEnd); assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); ZSTD_wildcopy(op, match, sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
} }
return sequenceLength; return sequenceLength;
} }
@ -1151,7 +1158,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
return sequenceLength; return sequenceLength;
} }
/* span extDict & currentPrefixSegment */ /* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match; { size_t const length1 = (size_t)(dictEnd - match);
ZSTD_memmove(oLitEnd, match, length1); ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1; op = oLitEnd + length1;
sequence.matchLength -= length1; sequence.matchLength -= length1;
@ -1171,7 +1178,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
* longer than literals (in general). In silesia, ~10% of matches are longer * longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes. * than 16 bytes.
*/ */
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); ZSTD_wildcopy(op, match, sequence.matchLength, ZSTD_no_overlap);
return sequenceLength; return sequenceLength;
} }
assert(sequence.offset < WILDCOPY_VECLEN); assert(sequence.offset < WILDCOPY_VECLEN);
@ -1182,7 +1189,7 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
/* If the match length is > 8 bytes, then continue with the wildcopy. */ /* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) { if (sequence.matchLength > 8) {
assert(op < oMatchEnd); assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); ZSTD_wildcopy(op, match, sequence.matchLength-8, ZSTD_overlap_src_before_dst);
} }
return sequenceLength; return sequenceLength;
} }
@ -1405,10 +1412,8 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
const void* seqStart, size_t seqSize, int nbSeq, const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset) const ZSTD_longOffset_e isLongOffset)
{ {
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize); BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize);
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* litBufferEnd = dctx->litBufferEnd;
@ -1423,7 +1428,7 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
dctx->fseEntropy = 1; dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF( RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, ""); corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
@ -1515,7 +1520,8 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
/* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
if (nbSeq > 0) { if (nbSeq > 0) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr; const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
assert(dctx->litBufferEnd >= litPtr);
DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength); DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength);
if (leftoverLit) { if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
@ -1617,10 +1623,10 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
const void* seqStart, size_t seqSize, int nbSeq, const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset) const ZSTD_longOffset_e isLongOffset)
{ {
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer; BYTE* const oend = (dctx->litBufferLocation == ZSTD_not_in_dst) ?
ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize) :
dctx->litBuffer;
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const litEnd = litPtr + dctx->litSize;
@ -1635,7 +1641,7 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
dctx->fseEntropy = 1; dctx->fseEntropy = 1;
{ U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF( RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, ""); corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
@ -1719,7 +1725,7 @@ size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
/* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : memory address is only used for prefetching, not for dereferencing */ * No consequence though : memory address is only used for prefetching, not for dereferencing */
const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset); const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (ptrdiff_t)prefetchPos), (ptrdiff_t)sequence.offset);
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
} }
return prefetchPos + sequence.matchLength; return prefetchPos + sequence.matchLength;
@ -1736,10 +1742,10 @@ ZSTD_decompressSequencesLong_body(
const void* seqStart, size_t seqSize, int nbSeq, const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset) const ZSTD_longOffset_e isLongOffset)
{ {
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize); BYTE* const oend = (dctx->litBufferLocation == ZSTD_in_dst) ?
dctx->litBuffer :
ZSTD_maybeNullPtrAdd(ostart, (ptrdiff_t)maxDstSize);
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* litBufferEnd = dctx->litBufferEnd;
@ -1761,9 +1767,8 @@ ZSTD_decompressSequencesLong_body(
dctx->fseEntropy = 1; dctx->fseEntropy = 1;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
assert(dst != NULL); assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF( RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), ERR_isError(BIT_initDStream(&seqState.DStream, seqStart, seqSize)),
corruption_detected, ""); corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
@ -1782,9 +1787,9 @@ ZSTD_decompressSequencesLong_body(
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) {
/* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
const size_t leftoverLit = dctx->litBufferEnd - litPtr; const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
if (leftoverLit) assert(dctx->litBufferEnd >= litPtr);
{ if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
@ -1828,7 +1833,8 @@ ZSTD_decompressSequencesLong_body(
for ( ; seqNb<nbSeq ; seqNb++) { for ( ; seqNb<nbSeq ; seqNb++) {
seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]); seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr; const size_t leftoverLit = (size_t)(dctx->litBufferEnd - litPtr);
assert(dctx->litBufferEnd >= litPtr);
if (leftoverLit) { if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
@ -1867,7 +1873,8 @@ ZSTD_decompressSequencesLong_body(
/* last literal segment */ /* last literal segment */
if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */
size_t const lastLLSize = litBufferEnd - litPtr; size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
assert(litBufferEnd >= litPtr);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) { if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize); ZSTD_memmove(op, litPtr, lastLLSize);
@ -1876,7 +1883,8 @@ ZSTD_decompressSequencesLong_body(
litPtr = dctx->litExtraBuffer; litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
} }
{ size_t const lastLLSize = litBufferEnd - litPtr; { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
assert(litBufferEnd >= litPtr);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) { if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize); ZSTD_memmove(op, litPtr, lastLLSize);
@ -2094,7 +2102,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
* Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
*/ */
size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart); size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd(dst, (ptrdiff_t)blockSizeMax), (BYTE const*)dctx->virtualStart);
/* isLongOffset must be true if there are long offsets. /* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than ZSTD_maxShortOffset(). * Offsets are long if they are larger than ZSTD_maxShortOffset().
* We don't expect that to be the case in 64-bit mode. * We don't expect that to be the case in 64-bit mode.