Import lz4 v1.9.3 (#3148)
Upstream: git@github.com:lz4/lz4.git Version: d44371841a2f1728a3f36839fd4b7e872d0927d3 librdkafka modifications: - rename xxhash.[ch] -> rdxxhash.[ch]
This commit is contained in:
Родитель
541c4aa869
Коммит
0fc3eb7403
|
@ -31,6 +31,8 @@ and the sticky consumer group partition assignor.
|
|||
* Added `assignor` debug context for troubleshooting consumer partition
|
||||
assignments.
|
||||
* Updated to OpenSSL v1.1.1h when building dependencies.
|
||||
* Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3
|
||||
which has vast performance improvements.
|
||||
* Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the
|
||||
default topic configuration object from a global configuration object.
|
||||
|
||||
|
|
555
src/lz4.c
555
src/lz4.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
170
src/lz4.h
170
src/lz4.h
|
@ -100,7 +100,7 @@ extern "C" {
|
|||
/*------ Version ------*/
|
||||
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
|
||||
#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
|
||||
|
||||
|
@ -186,7 +186,8 @@ LZ4LIB_API int LZ4_compressBound(int inputSize);
|
|||
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
|
||||
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
|
||||
An acceleration value of "1" is the same as regular LZ4_compress_default()
|
||||
Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c).
|
||||
Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
|
||||
Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||
|
||||
|
@ -212,7 +213,18 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
|
|||
* New value is necessarily <= input value.
|
||||
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
||||
* or 0 if compression fails.
|
||||
*/
|
||||
*
|
||||
* Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
|
||||
* the produced compressed content could, in specific circumstances,
|
||||
* require to be decompressed into a destination buffer larger
|
||||
* by at least 1 byte than the content to decompress.
|
||||
* If an application uses `LZ4_compress_destSize()`,
|
||||
* it's highly recommended to update liblz4 to v1.9.2 or better.
|
||||
* If this can't be done or ensured,
|
||||
* the receiving decompression function should provide
|
||||
* a dstCapacity which is > decompressedSize, by at least 1 byte.
|
||||
* See https://github.com/lz4/lz4/issues/859 for details
|
||||
*/
|
||||
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||
|
||||
|
||||
|
@ -220,25 +232,35 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt
|
|||
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
|
||||
* into destination buffer 'dst' of size 'dstCapacity'.
|
||||
* Up to 'targetOutputSize' bytes will be decoded.
|
||||
* The function stops decoding on reaching this objective,
|
||||
* which can boost performance when only the beginning of a block is required.
|
||||
* The function stops decoding on reaching this objective.
|
||||
* This can be useful to boost performance
|
||||
* whenever only the beginning of a block is required.
|
||||
*
|
||||
* @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity)
|
||||
* @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
|
||||
* If source stream is detected malformed, function returns a negative result.
|
||||
*
|
||||
* Note : @return can be < targetOutputSize, if compressed block contains less data.
|
||||
* Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
|
||||
*
|
||||
* Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity,
|
||||
* and expects targetOutputSize <= dstCapacity.
|
||||
* It effectively stops decoding on reaching targetOutputSize,
|
||||
* Note 2 : targetOutputSize must be <= dstCapacity
|
||||
*
|
||||
* Note 3 : this function effectively stops decoding on reaching targetOutputSize,
|
||||
* so dstCapacity is kind of redundant.
|
||||
* This is because in a previous version of this function,
|
||||
* decoding operation would not "break" a sequence in the middle.
|
||||
* As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize,
|
||||
* This is because in older versions of this function,
|
||||
* decoding operation would still write complete sequences.
|
||||
* Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
|
||||
* it could write more bytes, though only up to dstCapacity.
|
||||
* Some "margin" used to be required for this operation to work properly.
|
||||
* This is no longer necessary.
|
||||
* The function nonetheless keeps its signature, in an effort to not break API.
|
||||
* Thankfully, this is no longer necessary.
|
||||
* The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
|
||||
*
|
||||
* Note 4 : If srcSize is the exact size of the block,
|
||||
* then targetOutputSize can be any value,
|
||||
* including larger than the block's decompressed size.
|
||||
* The function will, at most, generate block's decompressed size.
|
||||
*
|
||||
* Note 5 : If srcSize is _larger_ than block's compressed size,
|
||||
* then targetOutputSize **MUST** be <= block's decompressed size.
|
||||
* Otherwise, *silent corruption will occur*.
|
||||
*/
|
||||
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
|
||||
|
||||
|
@ -547,74 +569,64 @@ LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const
|
|||
#define LZ4_H_98237428734687
|
||||
|
||||
/*-************************************************************
|
||||
* PRIVATE DEFINITIONS
|
||||
* Private Definitions
|
||||
**************************************************************
|
||||
* Do not use these definitions directly.
|
||||
* They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
|
||||
* Accessing members will expose code to API and/or ABI break in future versions of the library.
|
||||
* Accessing members will expose user code to API and/or ABI break in future versions of the library.
|
||||
**************************************************************/
|
||||
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
|
||||
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
|
||||
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
uint32_t hashTable[LZ4_HASH_SIZE_U32];
|
||||
uint32_t currentOffset;
|
||||
uint16_t dirty;
|
||||
uint16_t tableType;
|
||||
const uint8_t* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
uint32_t dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const uint8_t* externalDict;
|
||||
size_t extDictSize;
|
||||
const uint8_t* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
# include <stdint.h>
|
||||
typedef int8_t LZ4_i8;
|
||||
typedef uint8_t LZ4_byte;
|
||||
typedef uint16_t LZ4_u16;
|
||||
typedef uint32_t LZ4_u32;
|
||||
#else
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
unsigned int hashTable[LZ4_HASH_SIZE_U32];
|
||||
unsigned int currentOffset;
|
||||
unsigned short dirty;
|
||||
unsigned short tableType;
|
||||
const unsigned char* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
unsigned int dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const unsigned char* externalDict;
|
||||
const unsigned char* prefixEnd;
|
||||
size_t extDictSize;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
typedef signed char LZ4_i8;
|
||||
typedef unsigned char LZ4_byte;
|
||||
typedef unsigned short LZ4_u16;
|
||||
typedef unsigned int LZ4_u32;
|
||||
#endif
|
||||
|
||||
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
|
||||
struct LZ4_stream_t_internal {
|
||||
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
|
||||
LZ4_u32 currentOffset;
|
||||
LZ4_u32 tableType;
|
||||
const LZ4_byte* dictionary;
|
||||
const LZ4_stream_t_internal* dictCtx;
|
||||
LZ4_u32 dictSize;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const LZ4_byte* externalDict;
|
||||
size_t extDictSize;
|
||||
const LZ4_byte* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} LZ4_streamDecode_t_internal;
|
||||
|
||||
|
||||
/*! LZ4_stream_t :
|
||||
* information structure to track an LZ4 stream.
|
||||
* Do not use below internal definitions directly !
|
||||
* Declare or allocate an LZ4_stream_t instead.
|
||||
* LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
|
||||
* The structure definition can be convenient for static allocation
|
||||
* (on stack, or as part of larger structure).
|
||||
* Init this structure with LZ4_initStream() before first use.
|
||||
* note : only use this definition in association with static linking !
|
||||
* this definition is not API/ABI safe, and may change in a future version.
|
||||
* this definition is not API/ABI safe, and may change in future versions.
|
||||
*/
|
||||
#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ )
|
||||
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
|
||||
#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
|
||||
#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
|
||||
union LZ4_stream_u {
|
||||
unsigned long long table[LZ4_STREAMSIZE_U64];
|
||||
void* table[LZ4_STREAMSIZE_VOIDP];
|
||||
LZ4_stream_t_internal internal_donotuse;
|
||||
} ; /* previously typedef'd to LZ4_stream_t */
|
||||
}; /* previously typedef'd to LZ4_stream_t */
|
||||
|
||||
|
||||
/*! LZ4_initStream() : v1.9.0+
|
||||
* An LZ4_stream_t structure must be initialized at least once.
|
||||
|
@ -667,22 +679,21 @@ union LZ4_streamDecode_u {
|
|||
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
|
||||
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define LZ4_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif (LZ4_GCC_VERSION >= 301)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
|
||||
# define LZ4_DEPRECATED(message) __attribute__((deprecated))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
|
||||
# define LZ4_DEPRECATED(message)
|
||||
# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
|
||||
# define LZ4_DEPRECATED(message) /* disabled */
|
||||
# endif
|
||||
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
/* Obsolete compression functions */
|
||||
/*! Obsolete compression functions (since v1.7.3) */
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
|
||||
|
@ -690,11 +701,12 @@ LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_co
|
|||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
|
||||
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
|
||||
|
||||
/* Obsolete decompression functions */
|
||||
/*! Obsolete decompression functions (since v1.8.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
|
||||
|
||||
/* Obsolete streaming functions; degraded functionality; do not use!
|
||||
/* Obsolete streaming functions (since v1.7.0)
|
||||
* degraded functionality; do not use!
|
||||
*
|
||||
* In order to perform streaming compression, these functions depended on data
|
||||
* that is no longer tracked in the state. They have been preserved as well as
|
||||
|
@ -708,23 +720,22 @@ LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStre
|
|||
LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
|
||||
LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
|
||||
|
||||
/* Obsolete streaming decoding functions */
|
||||
/*! Obsolete streaming decoding functions (since v1.7.0) */
|
||||
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
|
||||
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
|
||||
|
||||
/*! LZ4_decompress_fast() : **unsafe!**
|
||||
/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
|
||||
* These functions used to be faster than LZ4_decompress_safe(),
|
||||
* but it has changed, and they are now slower than LZ4_decompress_safe().
|
||||
* but this is no longer the case. They are now slower.
|
||||
* This is because LZ4_decompress_fast() doesn't know the input size,
|
||||
* and therefore must progress more cautiously in the input buffer to not read beyond the end of block.
|
||||
* and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
|
||||
* On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
|
||||
* As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
|
||||
*
|
||||
* The last remaining LZ4_decompress_fast() specificity is that
|
||||
* it can decompress a block without knowing its compressed size.
|
||||
* Such functionality could be achieved in a more secure manner,
|
||||
* by also providing the maximum size of input buffer,
|
||||
* but it would require new prototypes, and adaptation of the implementation to this new use case.
|
||||
* Such functionality can be achieved in a more secure manner
|
||||
* by employing LZ4_decompress_safe_partial().
|
||||
*
|
||||
* Parameters:
|
||||
* originalSize : is the uncompressed size to regenerate.
|
||||
|
@ -739,7 +750,6 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4
|
|||
* But they may happen if input data is invalid (error or intentional tampering).
|
||||
* As a consequence, use these functions in trusted environments with trusted data **only**.
|
||||
*/
|
||||
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
|
||||
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
||||
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
|
||||
|
|
149
src/lz4frame.c
149
src/lz4frame.c
|
@ -71,8 +71,8 @@
|
|||
* towards another library or solution of their choice
|
||||
* by modifying below section.
|
||||
*/
|
||||
#include <stdlib.h> /* malloc, calloc, free */
|
||||
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
|
||||
# include <stdlib.h> /* malloc, calloc, free */
|
||||
# define ALLOC(s) malloc(s)
|
||||
# define ALLOC_AND_ZERO(s) calloc(1,(s))
|
||||
# define FREEMEM(p) free(p)
|
||||
|
@ -533,7 +533,7 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
|
|||
* If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
|
||||
* Object can release its memory using LZ4F_freeCompressionContext();
|
||||
*/
|
||||
LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version)
|
||||
LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
|
||||
{
|
||||
LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
|
||||
if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
|
||||
|
@ -541,20 +541,18 @@ LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_c
|
|||
cctxPtr->version = version;
|
||||
cctxPtr->cStage = 0; /* Next stage : init stream */
|
||||
|
||||
*LZ4F_compressionContextPtr = (LZ4F_compressionContext_t)cctxPtr;
|
||||
*LZ4F_compressionContextPtr = cctxPtr;
|
||||
|
||||
return LZ4F_OK_NoError;
|
||||
}
|
||||
|
||||
|
||||
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_compressionContext)
|
||||
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
|
||||
{
|
||||
LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)LZ4F_compressionContext;
|
||||
|
||||
if (cctxPtr != NULL) { /* support free on NULL */
|
||||
FREEMEM(cctxPtr->lz4CtxPtr); /* works because LZ4_streamHC_t and LZ4_stream_t are simple POD types */
|
||||
FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
|
||||
FREEMEM(cctxPtr->tmpBuff);
|
||||
FREEMEM(LZ4F_compressionContext);
|
||||
FREEMEM(cctxPtr);
|
||||
}
|
||||
|
||||
return LZ4F_OK_NoError;
|
||||
|
@ -725,6 +723,9 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
|
|||
*/
|
||||
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
|
||||
{
|
||||
if (preferencesPtr && preferencesPtr->autoFlush) {
|
||||
return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
|
||||
}
|
||||
return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
|
||||
}
|
||||
|
||||
|
@ -747,6 +748,7 @@ static size_t LZ4F_makeBlock(void* dst,
|
|||
(int)(srcSize), (int)(srcSize-1),
|
||||
level, cdict);
|
||||
if (cSize == 0) { /* compression failed */
|
||||
DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
|
||||
cSize = (U32)srcSize;
|
||||
LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
|
||||
memcpy(cSizePtr+BHSize, src, srcSize);
|
||||
|
@ -834,7 +836,7 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
|||
LZ4F_lastBlockStatus lastBlockCompressed = notDone;
|
||||
compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
|
||||
|
||||
DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%"PRIusz")", srcSize);
|
||||
DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
|
||||
|
||||
if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
|
||||
if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
|
||||
|
@ -989,6 +991,7 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
|||
BYTE* dstPtr = dstStart;
|
||||
|
||||
size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
|
||||
DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
|
||||
if (LZ4F_isError(flushSize)) return flushSize;
|
||||
dstPtr += flushSize;
|
||||
|
||||
|
@ -1002,6 +1005,7 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
|
|||
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
|
||||
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
|
||||
if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
|
||||
DEBUGLOG(5,"Writing 32-bit content checksum");
|
||||
LZ4F_writeLE32(dstPtr, xxh);
|
||||
dstPtr+=4; /* content Checksum */
|
||||
}
|
||||
|
@ -1112,6 +1116,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|||
size_t frameHeaderSize;
|
||||
const BYTE* srcPtr = (const BYTE*)src;
|
||||
|
||||
DEBUGLOG(5, "LZ4F_decodeHeader");
|
||||
/* need to decode header to get frameInfo */
|
||||
if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
|
||||
MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
|
||||
|
@ -1132,8 +1137,10 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
|
|||
|
||||
/* control magic number */
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER)
|
||||
if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
|
||||
DEBUGLOG(4, "frame header error : unknown magic number");
|
||||
return err0r(LZ4F_ERROR_frameType_unknown);
|
||||
}
|
||||
#endif
|
||||
dctx->frameInfo.frameType = LZ4F_frame;
|
||||
|
||||
|
@ -1282,15 +1289,20 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
|||
|
||||
|
||||
/* LZ4F_updateDict() :
|
||||
* only used for LZ4F_blockLinked mode */
|
||||
* only used for LZ4F_blockLinked mode
|
||||
* Condition : dstPtr != NULL
|
||||
*/
|
||||
static void LZ4F_updateDict(LZ4F_dctx* dctx,
|
||||
const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
|
||||
unsigned withinTmp)
|
||||
{
|
||||
if (dctx->dictSize==0)
|
||||
dctx->dict = (const BYTE*)dstPtr; /* priority to dictionary continuity */
|
||||
assert(dstPtr != NULL);
|
||||
if (dctx->dictSize==0) {
|
||||
dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
|
||||
}
|
||||
assert(dctx->dict != NULL);
|
||||
|
||||
if (dctx->dict + dctx->dictSize == dstPtr) { /* dictionary continuity, directly within dstBuffer */
|
||||
if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
|
||||
dctx->dictSize += dstSize;
|
||||
return;
|
||||
}
|
||||
|
@ -1304,9 +1316,10 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx,
|
|||
|
||||
assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
|
||||
|
||||
/* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOut */
|
||||
/* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
|
||||
assert(dctx->tmpOutBuffer != NULL);
|
||||
|
||||
if ((withinTmp) && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
|
||||
if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
|
||||
/* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
|
||||
assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
|
||||
dctx->dictSize += dstSize;
|
||||
|
@ -1378,17 +1391,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
const BYTE* const srcEnd = srcStart + *srcSizePtr;
|
||||
const BYTE* srcPtr = srcStart;
|
||||
BYTE* const dstStart = (BYTE*)dstBuffer;
|
||||
BYTE* const dstEnd = dstStart + *dstSizePtr;
|
||||
BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
|
||||
BYTE* dstPtr = dstStart;
|
||||
const BYTE* selectedIn = NULL;
|
||||
unsigned doAnotherStage = 1;
|
||||
size_t nextSrcSizeHint = 1;
|
||||
|
||||
|
||||
DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
|
||||
srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
|
||||
if (dstBuffer == NULL) assert(*dstSizePtr == 0);
|
||||
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
|
||||
if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
|
||||
*srcSizePtr = 0;
|
||||
*dstSizePtr = 0;
|
||||
assert(dctx != NULL);
|
||||
|
||||
/* behaves as a state machine */
|
||||
|
||||
|
@ -1398,6 +1415,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
{
|
||||
|
||||
case dstage_getFrameHeader:
|
||||
DEBUGLOG(6, "dstage_getFrameHeader");
|
||||
if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
|
||||
size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
|
||||
if (LZ4F_isError(hSize)) return hSize;
|
||||
|
@ -1411,6 +1429,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
/* fall-through */
|
||||
|
||||
case dstage_storeFrameHeader:
|
||||
DEBUGLOG(6, "dstage_storeFrameHeader");
|
||||
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
|
||||
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
|
||||
dctx->tmpInSize += sizeToCopy;
|
||||
|
@ -1427,6 +1446,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
break;
|
||||
|
||||
case dstage_init:
|
||||
DEBUGLOG(6, "dstage_init");
|
||||
if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
|
||||
/* internal buffers allocation */
|
||||
{ size_t const bufferNeeded = dctx->maxBlockSize
|
||||
|
@ -1480,17 +1500,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
} /* if (dctx->dStage == dstage_storeBlockHeader) */
|
||||
|
||||
/* decode block header */
|
||||
{ size_t const nextCBlockSize = LZ4F_readLE32(selectedIn) & 0x7FFFFFFFU;
|
||||
{ U32 const blockHeader = LZ4F_readLE32(selectedIn);
|
||||
size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
|
||||
size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
|
||||
if (nextCBlockSize==0) { /* frameEnd signal, no more block */
|
||||
if (blockHeader==0) { /* frameEnd signal, no more block */
|
||||
DEBUGLOG(5, "end of frame");
|
||||
dctx->dStage = dstage_getSuffix;
|
||||
break;
|
||||
}
|
||||
if (nextCBlockSize > dctx->maxBlockSize)
|
||||
if (nextCBlockSize > dctx->maxBlockSize) {
|
||||
return err0r(LZ4F_ERROR_maxBlockSize_invalid);
|
||||
if (LZ4F_readLE32(selectedIn) & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
|
||||
}
|
||||
if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
|
||||
/* next block is uncompressed */
|
||||
dctx->tmpInTarget = nextCBlockSize;
|
||||
DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
|
||||
if (dctx->frameInfo.blockChecksumFlag) {
|
||||
(void)XXH32_reset(&dctx->blockChecksum, 0);
|
||||
}
|
||||
|
@ -1508,20 +1532,26 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
}
|
||||
|
||||
case dstage_copyDirect: /* uncompressed block */
|
||||
{ size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
|
||||
size_t const sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
|
||||
memcpy(dstPtr, srcPtr, sizeToCopy);
|
||||
if (dctx->frameInfo.blockChecksumFlag) {
|
||||
(void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
|
||||
}
|
||||
if (dctx->frameInfo.contentChecksumFlag)
|
||||
(void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
|
||||
if (dctx->frameInfo.contentSize)
|
||||
dctx->frameRemainingSize -= sizeToCopy;
|
||||
DEBUGLOG(6, "dstage_copyDirect");
|
||||
{ size_t sizeToCopy;
|
||||
if (dstPtr == NULL) {
|
||||
sizeToCopy = 0;
|
||||
} else {
|
||||
size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
|
||||
sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
|
||||
memcpy(dstPtr, srcPtr, sizeToCopy);
|
||||
if (dctx->frameInfo.blockChecksumFlag) {
|
||||
(void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
|
||||
}
|
||||
if (dctx->frameInfo.contentChecksumFlag)
|
||||
(void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
|
||||
if (dctx->frameInfo.contentSize)
|
||||
dctx->frameRemainingSize -= sizeToCopy;
|
||||
|
||||
/* history management (linked blocks only)*/
|
||||
if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
|
||||
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
|
||||
/* history management (linked blocks only)*/
|
||||
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
|
||||
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
|
||||
} }
|
||||
|
||||
srcPtr += sizeToCopy;
|
||||
dstPtr += sizeToCopy;
|
||||
|
@ -1534,15 +1564,16 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
break;
|
||||
}
|
||||
dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
|
||||
nextSrcSizeHint = dctx->tmpInTarget +
|
||||
+(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
|
||||
+ BHSize /* next header size */;
|
||||
doAnotherStage = 0;
|
||||
break;
|
||||
}
|
||||
nextSrcSizeHint = dctx->tmpInTarget +
|
||||
+(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
|
||||
+ BHSize /* next header size */;
|
||||
doAnotherStage = 0;
|
||||
break;
|
||||
|
||||
/* check block checksum for recently transferred uncompressed block */
|
||||
case dstage_getBlockChecksum:
|
||||
DEBUGLOG(6, "dstage_getBlockChecksum");
|
||||
{ const void* crcSrc;
|
||||
if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
|
||||
crcSrc = srcPtr;
|
||||
|
@ -1562,8 +1593,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
{ U32 const readCRC = LZ4F_readLE32(crcSrc);
|
||||
U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (readCRC != calcCRC)
|
||||
DEBUGLOG(6, "compare block checksum");
|
||||
if (readCRC != calcCRC) {
|
||||
DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
|
||||
readCRC, calcCRC);
|
||||
return err0r(LZ4F_ERROR_blockChecksum_invalid);
|
||||
}
|
||||
#else
|
||||
(void)readCRC;
|
||||
(void)calcCRC;
|
||||
|
@ -1573,6 +1608,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
break;
|
||||
|
||||
case dstage_getCBlock:
|
||||
DEBUGLOG(6, "dstage_getCBlock");
|
||||
if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
|
||||
dctx->tmpInSize = 0;
|
||||
dctx->dStage = dstage_storeCBlock;
|
||||
|
@ -1582,7 +1618,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
selectedIn = srcPtr;
|
||||
srcPtr += dctx->tmpInTarget;
|
||||
|
||||
if (0) /* jump over next block */
|
||||
if (0) /* always jump over next block */
|
||||
case dstage_storeCBlock:
|
||||
{ size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
|
||||
size_t const inputLeft = (size_t)(srcEnd-srcPtr);
|
||||
|
@ -1619,6 +1655,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
const char* dict = (const char*)dctx->dict;
|
||||
size_t dictSize = dctx->dictSize;
|
||||
int decodedSize;
|
||||
assert(dstPtr != NULL);
|
||||
if (dict && dictSize > 1 GB) {
|
||||
/* the dictSize param is an int, avoid truncation / sign issues */
|
||||
dict += dictSize - 64 KB;
|
||||
|
@ -1636,8 +1673,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
dctx->frameRemainingSize -= (size_t)decodedSize;
|
||||
|
||||
/* dictionary management */
|
||||
if (dctx->frameInfo.blockMode==LZ4F_blockLinked)
|
||||
if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
|
||||
LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
|
||||
}
|
||||
|
||||
dstPtr += decodedSize;
|
||||
dctx->dStage = dstage_getBlockHeader;
|
||||
|
@ -1684,7 +1722,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
/* fall-through */
|
||||
|
||||
case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
|
||||
{ size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
|
||||
DEBUGLOG(6, "dstage_flushOut");
|
||||
if (dstPtr != NULL) {
|
||||
size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
|
||||
memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
|
||||
|
||||
/* dictionary management */
|
||||
|
@ -1693,16 +1733,15 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
|
||||
dctx->tmpOutStart += sizeToCopy;
|
||||
dstPtr += sizeToCopy;
|
||||
|
||||
if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
|
||||
dctx->dStage = dstage_getBlockHeader; /* get next block */
|
||||
break;
|
||||
}
|
||||
/* could not flush everything : stop there, just request a block header */
|
||||
doAnotherStage = 0;
|
||||
nextSrcSizeHint = BHSize;
|
||||
}
|
||||
if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
|
||||
dctx->dStage = dstage_getBlockHeader; /* get next block */
|
||||
break;
|
||||
}
|
||||
/* could not flush everything : stop there, just request a block header */
|
||||
doAnotherStage = 0;
|
||||
nextSrcSizeHint = BHSize;
|
||||
break;
|
||||
|
||||
case dstage_getSuffix:
|
||||
if (dctx->frameRemainingSize)
|
||||
|
@ -1806,6 +1845,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
|
||||
if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
|
||||
&& (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
|
||||
&& (dctx->dict != NULL) /* dictionary exists */
|
||||
&& (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
|
||||
&& ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
|
||||
{
|
||||
|
@ -1815,9 +1855,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
|
||||
if (dctx->tmpOutSize > 64 KB) copySize = 0;
|
||||
if (copySize > preserveSize) copySize = preserveSize;
|
||||
assert(dctx->tmpOutBuffer != NULL);
|
||||
|
||||
if (copySize > 0)
|
||||
memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
|
||||
memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
|
||||
|
||||
dctx->dict = dctx->tmpOutBuffer;
|
||||
dctx->dictSize = preserveSize + dctx->tmpOutStart;
|
||||
|
@ -1825,8 +1865,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
|
|||
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
|
||||
size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
|
||||
|
||||
if (newDictSize > 0)
|
||||
memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
|
||||
memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
|
||||
|
||||
dctx->dict = dctx->tmpOutBuffer;
|
||||
dctx->dictSize = newDictSize;
|
||||
|
|
|
@ -66,17 +66,22 @@ extern "C" {
|
|||
*****************************************************************/
|
||||
/* LZ4_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
* LZ4FLIB_API :
|
||||
* LZ4FLIB_VISIBILITY :
|
||||
* Control library symbols visibility.
|
||||
*/
|
||||
#ifndef LZ4FLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define LZ4FLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
|
||||
# define LZ4FLIB_API __declspec(dllexport)
|
||||
# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
|
||||
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
|
||||
# define LZ4FLIB_API __declspec(dllimport)
|
||||
#elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define LZ4FLIB_API __attribute__ ((__visibility__ ("default")))
|
||||
# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
|
||||
#else
|
||||
# define LZ4FLIB_API
|
||||
# define LZ4FLIB_API LZ4FLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
|
||||
|
@ -103,7 +108,7 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
|
|||
|
||||
/*-************************************
|
||||
* Frame compression types
|
||||
**************************************/
|
||||
************************************* */
|
||||
/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
|
||||
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
|
||||
# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
|
||||
|
@ -113,7 +118,8 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
|
|||
|
||||
/* The larger the block size, the (slightly) better the compression ratio,
|
||||
* though there are diminishing returns.
|
||||
* Larger blocks also increase memory usage on both compression and decompression sides. */
|
||||
* Larger blocks also increase memory usage on both compression and decompression sides.
|
||||
*/
|
||||
typedef enum {
|
||||
LZ4F_default=0,
|
||||
LZ4F_max64KB=4,
|
||||
|
@ -284,7 +290,7 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
|
|||
* @return is always the same for a srcSize and prefsPtr.
|
||||
* prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
|
||||
* tech details :
|
||||
* @return includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
|
||||
* @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
|
||||
* It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
|
||||
* @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
|
||||
*/
|
||||
|
@ -376,7 +382,7 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
|
|||
* note : Frame header size is variable, but is guaranteed to be
|
||||
* >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
|
||||
*/
|
||||
size_t LZ4F_headerSize(const void* src, size_t srcSize);
|
||||
LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! LZ4F_getFrameInfo() :
|
||||
* This function extracts frame parameters (max blockSize, dictID, etc.).
|
||||
|
@ -426,8 +432,10 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
|
|||
const void* srcBuffer, size_t* srcSizePtr);
|
||||
|
||||
/*! LZ4F_decompress() :
|
||||
* Call this function repetitively to regenerate compressed data from `srcBuffer`.
|
||||
* The function will read up to *srcSizePtr bytes from srcBuffer,
|
||||
* Call this function repetitively to regenerate data compressed in `srcBuffer`.
|
||||
*
|
||||
* The function requires a valid dctx state.
|
||||
* It will read up to *srcSizePtr bytes from srcBuffer,
|
||||
* and decompress data into dstBuffer, of capacity *dstSizePtr.
|
||||
*
|
||||
* The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
|
||||
|
@ -493,9 +501,9 @@ extern "C" {
|
|||
* Use at your own risk.
|
||||
*/
|
||||
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
|
||||
#define LZ4FLIB_STATIC_API LZ4FLIB_API
|
||||
# define LZ4FLIB_STATIC_API LZ4FLIB_API
|
||||
#else
|
||||
#define LZ4FLIB_STATIC_API
|
||||
# define LZ4FLIB_STATIC_API
|
||||
#endif
|
||||
|
||||
|
||||
|
|
307
src/lz4hc.c
307
src/lz4hc.c
|
@ -53,7 +53,7 @@
|
|||
#include "lz4hc.h"
|
||||
|
||||
|
||||
/*=== Common LZ4 definitions ===*/
|
||||
/*=== Common definitions ===*/
|
||||
#if defined(__GNUC__)
|
||||
# pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#endif
|
||||
|
@ -61,15 +61,16 @@
|
|||
# pragma clang diagnostic ignored "-Wunused-function"
|
||||
#endif
|
||||
|
||||
/*=== Enums ===*/
|
||||
typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
|
||||
|
||||
|
||||
#define LZ4_COMMONDEFS_ONLY
|
||||
#ifndef LZ4_SRC_INCLUDED
|
||||
#include "lz4.c" /* LZ4_count, constants, mem */
|
||||
#endif
|
||||
|
||||
|
||||
/*=== Enums ===*/
|
||||
typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
|
||||
|
||||
|
||||
/*=== Constants ===*/
|
||||
#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
|
||||
#define LZ4_OPT_NUM (1<<12)
|
||||
|
@ -92,7 +93,7 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)
|
|||
**************************************/
|
||||
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
|
||||
{
|
||||
MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
|
||||
MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
|
||||
MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
|
||||
}
|
||||
|
||||
|
@ -161,8 +162,7 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
|
|||
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
|
||||
{
|
||||
size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
|
||||
if (bitsToRotate == 0)
|
||||
return pattern;
|
||||
if (bitsToRotate == 0) return pattern;
|
||||
return LZ4HC_rotl32(pattern, (int)bitsToRotate);
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,8 @@ static unsigned
|
|||
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
|
||||
{
|
||||
const BYTE* const iStart = ip;
|
||||
reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
|
||||
reg_t const pattern = (sizeof(pattern)==8) ?
|
||||
(reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
|
||||
|
||||
while (likely(ip < iEnd-(sizeof(pattern)-1))) {
|
||||
reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
|
||||
|
@ -270,7 +271,7 @@ LZ4HC_InsertAndGetWiderMatch (
|
|||
DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
|
||||
matchIndex, lowestMatchIndex);
|
||||
|
||||
while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) {
|
||||
while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
|
||||
int matchLength=0;
|
||||
nbAttempts--;
|
||||
assert(matchIndex < ipIndex);
|
||||
|
@ -389,8 +390,8 @@ LZ4HC_InsertAndGetWiderMatch (
|
|||
if (lookBackLength==0) { /* no back possible */
|
||||
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
|
||||
if ((size_t)longest < maxML) {
|
||||
assert(base + matchIndex < ip);
|
||||
if (ip - (base+matchIndex) > LZ4_DISTANCE_MAX) break;
|
||||
assert(base + matchIndex != ip);
|
||||
if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
|
||||
assert(maxML < 2 GB);
|
||||
longest = (int)maxML;
|
||||
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
|
@ -410,7 +411,7 @@ LZ4HC_InsertAndGetWiderMatch (
|
|||
} /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
|
||||
|
||||
if ( dict == usingDictCtxHc
|
||||
&& nbAttempts
|
||||
&& nbAttempts > 0
|
||||
&& ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
|
||||
U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
|
||||
|
@ -460,74 +461,90 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
|
|||
* @return : 0 if ok,
|
||||
* 1 if buffer issue detected */
|
||||
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
const BYTE** ip,
|
||||
BYTE** op,
|
||||
const BYTE** anchor,
|
||||
const BYTE** _ip,
|
||||
BYTE** _op,
|
||||
const BYTE** _anchor,
|
||||
int matchLength,
|
||||
const BYTE* const match,
|
||||
limitedOutput_directive limit,
|
||||
BYTE* oend)
|
||||
{
|
||||
#define ip (*_ip)
|
||||
#define op (*_op)
|
||||
#define anchor (*_anchor)
|
||||
|
||||
size_t length;
|
||||
BYTE* const token = (*op)++;
|
||||
BYTE* const token = op++;
|
||||
|
||||
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
|
||||
static const BYTE* start = NULL;
|
||||
static U32 totalCost = 0;
|
||||
U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start);
|
||||
U32 const ll = (U32)(*ip - *anchor);
|
||||
U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
|
||||
U32 const ll = (U32)(ip - anchor);
|
||||
U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
|
||||
U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
|
||||
U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
|
||||
if (start==NULL) start = *anchor; /* only works for single segment */
|
||||
if (start==NULL) start = anchor; /* only works for single segment */
|
||||
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
|
||||
pos,
|
||||
(U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
|
||||
(U32)(ip - anchor), matchLength, (U32)(ip-match),
|
||||
cost, totalCost);
|
||||
totalCost += cost;
|
||||
#endif
|
||||
|
||||
/* Encode Literal length */
|
||||
length = (size_t)(*ip - *anchor);
|
||||
if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */
|
||||
length = (size_t)(ip - anchor);
|
||||
LZ4_STATIC_ASSERT(notLimited == 0);
|
||||
/* Check output limit */
|
||||
if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
|
||||
(int)length, (int)(oend - op));
|
||||
return 1;
|
||||
}
|
||||
if (length >= RUN_MASK) {
|
||||
size_t len = length - RUN_MASK;
|
||||
*token = (RUN_MASK << ML_BITS);
|
||||
for(; len >= 255 ; len -= 255) *(*op)++ = 255;
|
||||
*(*op)++ = (BYTE)len;
|
||||
for(; len >= 255 ; len -= 255) *op++ = 255;
|
||||
*op++ = (BYTE)len;
|
||||
} else {
|
||||
*token = (BYTE)(length << ML_BITS);
|
||||
}
|
||||
|
||||
/* Copy Literals */
|
||||
LZ4_wildCopy8(*op, *anchor, (*op) + length);
|
||||
*op += length;
|
||||
LZ4_wildCopy8(op, anchor, op + length);
|
||||
op += length;
|
||||
|
||||
/* Encode Offset */
|
||||
assert( (*ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
|
||||
LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
|
||||
assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
|
||||
LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
|
||||
|
||||
/* Encode MatchLength */
|
||||
assert(matchLength >= MINMATCH);
|
||||
length = (size_t)matchLength - MINMATCH;
|
||||
if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */
|
||||
if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write match length");
|
||||
return 1; /* Check output limit */
|
||||
}
|
||||
if (length >= ML_MASK) {
|
||||
*token += ML_MASK;
|
||||
length -= ML_MASK;
|
||||
for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
|
||||
if (length >= 255) { length -= 255; *(*op)++ = 255; }
|
||||
*(*op)++ = (BYTE)length;
|
||||
for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
|
||||
if (length >= 255) { length -= 255; *op++ = 255; }
|
||||
*op++ = (BYTE)length;
|
||||
} else {
|
||||
*token += (BYTE)(length);
|
||||
}
|
||||
|
||||
/* Prepare next loop */
|
||||
*ip += matchLength;
|
||||
*anchor = *ip;
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#undef ip
|
||||
#undef op
|
||||
#undef anchor
|
||||
|
||||
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
||||
LZ4HC_CCtx_internal* const ctx,
|
||||
|
@ -535,7 +552,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
|||
char* const dest,
|
||||
int* srcSizePtr,
|
||||
int const maxOutputSize,
|
||||
unsigned maxNbAttempts,
|
||||
int maxNbAttempts,
|
||||
const limitedOutput_directive limit,
|
||||
const dictCtx_directive dict
|
||||
)
|
||||
|
@ -565,7 +582,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
|||
/* init */
|
||||
*srcSizePtr = 0;
|
||||
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
|
||||
/* Main Loop */
|
||||
while (ip <= mflimit) {
|
||||
|
@ -637,7 +654,11 @@ _Search3:
|
|||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
|
||||
ip = start2;
|
||||
optr = op;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
|
||||
ml = ml2;
|
||||
ref = ref2;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -709,17 +730,18 @@ _Search3:
|
|||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) return 0; /* Check output limit */
|
||||
if (limit == limitedOutput) return 0;
|
||||
/* adapt lastRunSize to fill 'dest' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
|
@ -739,9 +761,25 @@ _last_literals:
|
|||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ml and ref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing");
|
||||
op = optr; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ml >= 0);
|
||||
if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
/* compression failed */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -752,7 +790,7 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
|
|||
int const nbSearches, size_t sufficient_len,
|
||||
const limitedOutput_directive limit, int const fullUpdate,
|
||||
const dictCtx_directive dict,
|
||||
HCfavor_e favorDecSpeed);
|
||||
const HCfavor_e favorDecSpeed);
|
||||
|
||||
|
||||
LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
|
@ -769,7 +807,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
|||
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
|
||||
typedef struct {
|
||||
lz4hc_strat_e strat;
|
||||
U32 nbSearches;
|
||||
int nbSearches;
|
||||
U32 targetLength;
|
||||
} cParams_t;
|
||||
static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
|
||||
|
@ -788,7 +826,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
|||
{ lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
|
||||
};
|
||||
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
ctx, src, *srcSizePtr, limit);
|
||||
|
||||
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
|
||||
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
|
||||
|
@ -808,7 +847,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
|||
assert(cParam.strat == lz4opt);
|
||||
result = LZ4HC_compress_optimal(ctx,
|
||||
src, dst, srcSizePtr, dstCapacity,
|
||||
(int)cParam.nbSearches, cParam.targetLength, limit,
|
||||
cParam.nbSearches, cParam.targetLength, limit,
|
||||
cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
|
||||
dict, favor);
|
||||
}
|
||||
|
@ -881,27 +920,22 @@ LZ4HC_compress_generic (
|
|||
|
||||
int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
|
||||
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
static size_t LZ4_streamHC_t_alignment(void)
|
||||
{
|
||||
struct { char c; LZ4_streamHC_t t; } t_a;
|
||||
return sizeof(t_a) - sizeof(t_a.t);
|
||||
}
|
||||
#if LZ4_ALIGN_TEST
|
||||
typedef struct { char c; LZ4_streamHC_t t; } t_a;
|
||||
return sizeof(t_a) - sizeof(LZ4_streamHC_t);
|
||||
#else
|
||||
return 1; /* effectively disabled */
|
||||
#endif
|
||||
}
|
||||
|
||||
/* state is presumed correctly initialized,
|
||||
* in which case its size and alignment have already been validate */
|
||||
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
assert(((size_t)state & (LZ4_streamHC_t_alignment() - 1)) == 0); /* check alignment */
|
||||
#endif
|
||||
if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
|
||||
if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
|
||||
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
|
||||
LZ4HC_init_internal (ctx, (const BYTE*)src);
|
||||
if (dstCapacity < LZ4_compressBound(srcSize))
|
||||
|
@ -950,10 +984,11 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
|
|||
/* allocation */
|
||||
LZ4_streamHC_t* LZ4_createStreamHC(void)
|
||||
{
|
||||
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
|
||||
if (LZ4_streamHCPtr==NULL) return NULL;
|
||||
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); /* full initialization, malloc'ed buffer can be full of garbage */
|
||||
return LZ4_streamHCPtr;
|
||||
LZ4_streamHC_t* const state =
|
||||
(LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
|
||||
if (state == NULL) return NULL;
|
||||
LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
|
||||
return state;
|
||||
}
|
||||
|
||||
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
|
||||
|
@ -968,22 +1003,16 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
|
|||
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
|
||||
{
|
||||
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
|
||||
if (buffer == NULL) return NULL;
|
||||
if (size < sizeof(LZ4_streamHC_t)) return NULL;
|
||||
#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
|
||||
* it reports an aligment of 8-bytes,
|
||||
* while actually aligning LZ4_streamHC_t on 4 bytes. */
|
||||
if (((size_t)buffer) & (LZ4_streamHC_t_alignment() - 1)) return NULL; /* alignment check */
|
||||
#endif
|
||||
/* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
|
||||
LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
|
||||
DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", LZ4_streamHCPtr, (unsigned)size);
|
||||
/* end-base will trigger a clearTable on starting compression */
|
||||
LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
|
||||
LZ4_streamHCPtr->internal_donotuse.base = NULL;
|
||||
LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
|
||||
LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
|
||||
LZ4_streamHCPtr->internal_donotuse.dirty = 0;
|
||||
DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
|
||||
/* check conditions */
|
||||
if (buffer == NULL) return NULL;
|
||||
if (size < sizeof(LZ4_streamHC_t)) return NULL;
|
||||
if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
|
||||
/* init */
|
||||
{ LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
|
||||
MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
|
||||
LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
|
||||
return LZ4_streamHCPtr;
|
||||
}
|
||||
|
@ -1028,7 +1057,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
|
|||
const char* dictionary, int dictSize)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
|
||||
DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
|
||||
assert(LZ4_streamHCPtr != NULL);
|
||||
if (dictSize > 64 KB) {
|
||||
dictionary += (size_t)dictSize - 64 KB;
|
||||
|
@ -1069,14 +1098,15 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
|
|||
ctxPtr->dictCtx = NULL;
|
||||
}
|
||||
|
||||
static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int dstCapacity,
|
||||
limitedOutput_directive limit)
|
||||
static int
|
||||
LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
const char* src, char* dst,
|
||||
int* srcSizePtr, int dstCapacity,
|
||||
limitedOutput_directive limit)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr);
|
||||
DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr, limit);
|
||||
assert(ctxPtr != NULL);
|
||||
/* auto-init if forgotten */
|
||||
if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
|
@ -1100,8 +1130,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
|||
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
|
||||
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
}
|
||||
}
|
||||
} }
|
||||
|
||||
return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
|
||||
}
|
||||
|
@ -1121,23 +1150,30 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
|
|||
|
||||
|
||||
|
||||
/* dictionary saving */
|
||||
|
||||
/* LZ4_saveDictHC :
|
||||
* save history content
|
||||
* into a user-provided buffer
|
||||
* which is then used to continue compression
|
||||
*/
|
||||
int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
|
||||
DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
assert(prefixSize >= 0);
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
if (dictSize < 4) dictSize = 0;
|
||||
if (dictSize > prefixSize) dictSize = prefixSize;
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
if (safeBuffer == NULL) assert(dictSize == 0);
|
||||
if (dictSize > 0)
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
|
||||
streamPtr->end = (const BYTE*)safeBuffer + dictSize;
|
||||
streamPtr->base = streamPtr->end - endIndex;
|
||||
streamPtr->dictLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->lowLimit = endIndex - (U32)dictSize;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit)
|
||||
streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
}
|
||||
return dictSize;
|
||||
}
|
||||
|
@ -1287,8 +1323,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
const dictCtx_directive dict,
|
||||
const HCfavor_e favorDecSpeed)
|
||||
{
|
||||
int retval = 0;
|
||||
#define TRAILING_LITERALS 3
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
|
||||
#else
|
||||
LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
|
||||
#endif
|
||||
|
||||
const BYTE* ip = (const BYTE*) source;
|
||||
const BYTE* anchor = ip;
|
||||
|
@ -1298,15 +1339,19 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
BYTE* op = (BYTE*) dst;
|
||||
BYTE* opSaved = (BYTE*) dst;
|
||||
BYTE* oend = op + dstCapacity;
|
||||
int ovml = MINMATCH; /* overflow - last sequence */
|
||||
const BYTE* ovref = NULL;
|
||||
|
||||
/* init */
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
if (opt == NULL) goto _return_label;
|
||||
#endif
|
||||
DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
|
||||
*srcSizePtr = 0;
|
||||
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
|
||||
if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
|
||||
|
||||
/* Main Loop */
|
||||
assert(ip - anchor < LZ4_MAX_INPUT_SIZE);
|
||||
while (ip <= mflimit) {
|
||||
int const llen = (int)(ip - anchor);
|
||||
int best_mlen, best_off;
|
||||
|
@ -1320,8 +1365,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
int const firstML = firstMatch.len;
|
||||
const BYTE* const matchPos = ip - firstMatch.off;
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = firstML;
|
||||
ovref = matchPos;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1463,7 +1511,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
best_off = opt[last_match_pos].off;
|
||||
cur = last_match_pos - best_mlen;
|
||||
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
assert(cur < LZ4_OPT_NUM);
|
||||
assert(last_match_pos >= 1); /* == 1 when only one candidate */
|
||||
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
|
||||
|
@ -1493,25 +1541,31 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
assert(ml >= MINMATCH);
|
||||
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = ml;
|
||||
ovref = ip - offset;
|
||||
goto _dest_overflow;
|
||||
} }
|
||||
} } }
|
||||
} /* while (ip <= mflimit) */
|
||||
|
||||
_last_literals:
|
||||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) return 0; /* Check output limit */
|
||||
if (limit == limitedOutput) { /* Check output limit */
|
||||
retval = 0;
|
||||
goto _return_label;
|
||||
}
|
||||
/* adapt lastRunSize to fill 'dst' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
|
@ -1527,12 +1581,35 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
|||
|
||||
/* End */
|
||||
*srcSizePtr = (int) (((const char*)ip) - source);
|
||||
return (int) ((char*)op-dst);
|
||||
retval = (int) ((char*)op-dst);
|
||||
goto _return_label;
|
||||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
goto _last_literals;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ovml and ovref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ovml >= 0);
|
||||
if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
|
||||
DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
|
||||
DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
|
||||
DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
FREEMEM(opt);
|
||||
#endif
|
||||
return retval;
|
||||
}
|
||||
|
|
55
src/lz4hc.h
55
src/lz4hc.h
|
@ -198,57 +198,32 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
|
|||
#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
|
||||
|
||||
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
uint32_t hashTable[LZ4HC_HASHTABLESIZE];
|
||||
uint16_t chainTable[LZ4HC_MAXD];
|
||||
const uint8_t* end; /* next block here to continue on current prefix */
|
||||
const uint8_t* base; /* All index relative to this position */
|
||||
const uint8_t* dictBase; /* alternate base for extDict */
|
||||
uint32_t dictLimit; /* below that point, need extDict */
|
||||
uint32_t lowLimit; /* below that point, no more dict */
|
||||
uint32_t nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
int8_t favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
int8_t dirty; /* stream has to be fully reset if this flag is set */
|
||||
LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
|
||||
LZ4_u16 chainTable[LZ4HC_MAXD];
|
||||
const LZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const LZ4_byte* base; /* All index relative to this position */
|
||||
const LZ4_byte* dictBase; /* alternate base for extDict */
|
||||
LZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
LZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
|
||||
struct LZ4HC_CCtx_internal
|
||||
{
|
||||
unsigned int hashTable[LZ4HC_HASHTABLESIZE];
|
||||
unsigned short chainTable[LZ4HC_MAXD];
|
||||
const unsigned char* end; /* next block here to continue on current prefix */
|
||||
const unsigned char* base; /* All index relative to this position */
|
||||
const unsigned char* dictBase; /* alternate base for extDict */
|
||||
unsigned int dictLimit; /* below that point, need extDict */
|
||||
unsigned int lowLimit; /* below that point, no more dict */
|
||||
unsigned int nextToUpdate; /* index from which to continue dictionary update */
|
||||
short compressionLevel;
|
||||
char favorDecSpeed; /* favor decompression speed if this flag set,
|
||||
otherwise, favor compression ratio */
|
||||
char dirty; /* stream has to be fully reset if this flag is set */
|
||||
const LZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* Do not use these definitions directly !
|
||||
* Declare or allocate an LZ4_streamHC_t instead.
|
||||
*/
|
||||
#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56 + ((sizeof(void*)==16) ? 56 : 0) /* AS400*/ ) /* 262200 or 262256*/
|
||||
#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
|
||||
#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
|
||||
#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
|
||||
union LZ4_streamHC_u {
|
||||
size_t table[LZ4_STREAMHCSIZE_SIZET];
|
||||
void* table[LZ4_STREAMHCSIZE_VOIDP];
|
||||
LZ4HC_CCtx_internal internal_donotuse;
|
||||
}; /* previously typedef'd to LZ4_streamHC_t */
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче