zstd_internal.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #ifndef ZSTD_CCOMMON_H_MODULE
  11. #define ZSTD_CCOMMON_H_MODULE
  12. /* this module contains definitions which must be identical
  13. * across compression, decompression and dictBuilder.
  14. * It also contains a few functions useful to at least 2 of them
  15. * and which benefit from being inlined */
  16. /*-*************************************
  17. * Dependencies
  18. ***************************************/
  19. #include "compiler.h"
  20. #include "mem.h"
  21. #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
  22. #include "error_private.h"
  23. #define ZSTD_STATIC_LINKING_ONLY
  24. #include <linux/zstd.h>
  25. #define FSE_STATIC_LINKING_ONLY
  26. #include "fse.h"
  27. #define HUF_STATIC_LINKING_ONLY
  28. #include "huf.h"
  29. #include <linux/xxhash.h> /* XXH_reset, update, digest */
  30. #define ZSTD_TRACE 0
  31. /* ---- static assert (debug) --- */
  32. #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
  33. #define ZSTD_isError ERR_isError /* for inlining */
  34. #define FSE_isError ERR_isError
  35. #define HUF_isError ERR_isError
  36. /*-*************************************
  37. * shared macros
  38. ***************************************/
  39. #undef MIN
  40. #undef MAX
  41. #define MIN(a,b) ((a)<(b) ? (a) : (b))
  42. #define MAX(a,b) ((a)>(b) ? (a) : (b))
  43. /*
  44. * Ignore: this is an internal helper.
  45. *
  46. * This is a helper function to help force C99-correctness during compilation.
  47. * Under strict compilation modes, variadic macro arguments can't be empty.
  48. * However, variadic function arguments can be. Using a function therefore lets
  49. * us statically check that at least one (string) argument was passed,
  50. * independent of the compilation flags.
  51. */
  52. static INLINE_KEYWORD UNUSED_ATTR
  53. void _force_has_format_string(const char *format, ...) {
  54. (void)format;
  55. }
  56. /*
  57. * Ignore: this is an internal helper.
  58. *
  59. * We want to force this function invocation to be syntactically correct, but
  60. * we don't want to force runtime evaluation of its arguments.
  61. */
  62. #define _FORCE_HAS_FORMAT_STRING(...) \
  63. if (0) { \
  64. _force_has_format_string(__VA_ARGS__); \
  65. }
  66. /*
  67. * Return the specified error if the condition evaluates to true.
  68. *
  69. * In debug modes, prints additional information.
  70. * In order to do that (particularly, printing the conditional that failed),
  71. * this can't just wrap RETURN_ERROR().
  72. */
  73. #define RETURN_ERROR_IF(cond, err, ...) \
  74. if (cond) { \
  75. RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
  76. __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
  77. _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
  78. RAWLOG(3, ": " __VA_ARGS__); \
  79. RAWLOG(3, "\n"); \
  80. return ERROR(err); \
  81. }
  82. /*
  83. * Unconditionally return the specified error.
  84. *
  85. * In debug modes, prints additional information.
  86. */
  87. #define RETURN_ERROR(err, ...) \
  88. do { \
  89. RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
  90. __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
  91. _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
  92. RAWLOG(3, ": " __VA_ARGS__); \
  93. RAWLOG(3, "\n"); \
  94. return ERROR(err); \
  95. } while(0);
  96. /*
  97. * If the provided expression evaluates to an error code, returns that error code.
  98. *
  99. * In debug modes, prints additional information.
  100. */
  101. #define FORWARD_IF_ERROR(err, ...) \
  102. do { \
  103. size_t const err_code = (err); \
  104. if (ERR_isError(err_code)) { \
  105. RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
  106. __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
  107. _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
  108. RAWLOG(3, ": " __VA_ARGS__); \
  109. RAWLOG(3, "\n"); \
  110. return err_code; \
  111. } \
  112. } while(0);
  113. /*-*************************************
  114. * Common constants
  115. ***************************************/
  116. #define ZSTD_OPT_NUM (1<<12)
  117. #define ZSTD_REP_NUM 3 /* number of repcodes */
  118. #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
  119. static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
  120. #define KB *(1 <<10)
  121. #define MB *(1 <<20)
  122. #define GB *(1U<<30)
  123. #define BIT7 128
  124. #define BIT6 64
  125. #define BIT5 32
  126. #define BIT4 16
  127. #define BIT1 2
  128. #define BIT0 1
  129. #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
  130. static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
  131. static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
  132. #define ZSTD_FRAMEIDSIZE 4 /* magic number size */
  133. #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
  134. static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
  135. typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
  136. #define ZSTD_FRAMECHECKSUMSIZE 4
  137. #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
  138. #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
  139. #define HufLog 12
  140. typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
  141. #define LONGNBSEQ 0x7F00
  142. #define MINMATCH 3
  143. #define Litbits 8
  144. #define MaxLit ((1<<Litbits) - 1)
  145. #define MaxML 52
  146. #define MaxLL 35
  147. #define DefaultMaxOff 28
  148. #define MaxOff 31
  149. #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
  150. #define MLFSELog 9
  151. #define LLFSELog 9
  152. #define OffFSELog 8
  153. #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
  154. #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
  155. /* Each table cannot take more than #symbols * FSELog bits */
  156. #define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
  157. static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
  158. 0, 0, 0, 0, 0, 0, 0, 0,
  159. 0, 0, 0, 0, 0, 0, 0, 0,
  160. 1, 1, 1, 1, 2, 2, 3, 3,
  161. 4, 6, 7, 8, 9,10,11,12,
  162. 13,14,15,16
  163. };
  164. static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
  165. 4, 3, 2, 2, 2, 2, 2, 2,
  166. 2, 2, 2, 2, 2, 1, 1, 1,
  167. 2, 2, 2, 2, 2, 2, 2, 2,
  168. 2, 3, 2, 1, 1, 1, 1, 1,
  169. -1,-1,-1,-1
  170. };
  171. #define LL_DEFAULTNORMLOG 6 /* for static allocation */
  172. static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
  173. static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
  174. 0, 0, 0, 0, 0, 0, 0, 0,
  175. 0, 0, 0, 0, 0, 0, 0, 0,
  176. 0, 0, 0, 0, 0, 0, 0, 0,
  177. 0, 0, 0, 0, 0, 0, 0, 0,
  178. 1, 1, 1, 1, 2, 2, 3, 3,
  179. 4, 4, 5, 7, 8, 9,10,11,
  180. 12,13,14,15,16
  181. };
  182. static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
  183. 1, 4, 3, 2, 2, 2, 2, 2,
  184. 2, 1, 1, 1, 1, 1, 1, 1,
  185. 1, 1, 1, 1, 1, 1, 1, 1,
  186. 1, 1, 1, 1, 1, 1, 1, 1,
  187. 1, 1, 1, 1, 1, 1, 1, 1,
  188. 1, 1, 1, 1, 1, 1,-1,-1,
  189. -1,-1,-1,-1,-1
  190. };
  191. #define ML_DEFAULTNORMLOG 6 /* for static allocation */
  192. static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
  193. static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
  194. 1, 1, 1, 1, 1, 1, 2, 2,
  195. 2, 1, 1, 1, 1, 1, 1, 1,
  196. 1, 1, 1, 1, 1, 1, 1, 1,
  197. -1,-1,-1,-1,-1
  198. };
  199. #define OF_DEFAULTNORMLOG 5 /* for static allocation */
  200. static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
  201. /*-*******************************************
  202. * Shared functions to include for inlining
  203. *********************************************/
  204. static void ZSTD_copy8(void* dst, const void* src) {
  205. ZSTD_memcpy(dst, src, 8);
  206. }
  207. #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
  208. static void ZSTD_copy16(void* dst, const void* src) {
  209. ZSTD_memcpy(dst, src, 16);
  210. }
  211. #define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
  212. #define WILDCOPY_OVERLENGTH 32
  213. #define WILDCOPY_VECLEN 16
  214. typedef enum {
  215. ZSTD_no_overlap,
  216. ZSTD_overlap_src_before_dst
  217. /* ZSTD_overlap_dst_before_src, */
  218. } ZSTD_overlap_e;
  219. /*! ZSTD_wildcopy() :
  220. * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
  221. * @param ovtype controls the overlap detection
  222. * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
  223. * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
  224. * The src buffer must be before the dst buffer.
  225. */
  226. MEM_STATIC FORCE_INLINE_ATTR
  227. void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
  228. {
  229. ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
  230. const BYTE* ip = (const BYTE*)src;
  231. BYTE* op = (BYTE*)dst;
  232. BYTE* const oend = op + length;
  233. assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
  234. if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
  235. /* Handle short offset copies. */
  236. do {
  237. COPY8(op, ip)
  238. } while (op < oend);
  239. } else {
  240. assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
  241. /* Separate out the first COPY16() call because the copy length is
  242. * almost certain to be short, so the branches have different
  243. * probabilities. Since it is almost certain to be short, only do
  244. * one COPY16() in the first call. Then, do two calls per loop since
  245. * at that point it is more likely to have a high trip count.
  246. */
  247. #ifdef __aarch64__
  248. do {
  249. COPY16(op, ip);
  250. }
  251. while (op < oend);
  252. #else
  253. ZSTD_copy16(op, ip);
  254. if (16 >= length) return;
  255. op += 16;
  256. ip += 16;
  257. do {
  258. COPY16(op, ip);
  259. COPY16(op, ip);
  260. }
  261. while (op < oend);
  262. #endif
  263. }
  264. }
  265. MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  266. {
  267. size_t const length = MIN(dstCapacity, srcSize);
  268. if (length > 0) {
  269. ZSTD_memcpy(dst, src, length);
  270. }
  271. return length;
  272. }
  273. /* define "workspace is too large" as this number of times larger than needed */
  274. #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
  275. /* when workspace is continuously too large
  276. * during at least this number of times,
  277. * context's memory usage is considered wasteful,
  278. * because it's sized to handle a worst case scenario which rarely happens.
  279. * In which case, resize it down to free some memory */
  280. #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
  281. /* Controls whether the input/output buffer is buffered or stable. */
  282. typedef enum {
  283. ZSTD_bm_buffered = 0, /* Buffer the input/output */
  284. ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
  285. } ZSTD_bufferMode_e;
  286. /*-*******************************************
  287. * Private declarations
  288. *********************************************/
  289. typedef struct seqDef_s {
  290. U32 offset; /* Offset code of the sequence */
  291. U16 litLength;
  292. U16 matchLength;
  293. } seqDef;
  294. typedef struct {
  295. seqDef* sequencesStart;
  296. seqDef* sequences; /* ptr to end of sequences */
  297. BYTE* litStart;
  298. BYTE* lit; /* ptr to end of literals */
  299. BYTE* llCode;
  300. BYTE* mlCode;
  301. BYTE* ofCode;
  302. size_t maxNbSeq;
  303. size_t maxNbLit;
  304. /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
  305. * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
  306. * the existing value of the litLength or matchLength by 0x10000.
  307. */
  308. U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
  309. U32 longLengthPos; /* Index of the sequence to apply long length modification to */
  310. } seqStore_t;
  311. typedef struct {
  312. U32 litLength;
  313. U32 matchLength;
  314. } ZSTD_sequenceLength;
  315. /*
  316. * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
  317. * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
  318. */
  319. MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
  320. {
  321. ZSTD_sequenceLength seqLen;
  322. seqLen.litLength = seq->litLength;
  323. seqLen.matchLength = seq->matchLength + MINMATCH;
  324. if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
  325. if (seqStore->longLengthID == 1) {
  326. seqLen.litLength += 0xFFFF;
  327. }
  328. if (seqStore->longLengthID == 2) {
  329. seqLen.matchLength += 0xFFFF;
  330. }
  331. }
  332. return seqLen;
  333. }
  334. /*
  335. * Contains the compressed frame size and an upper-bound for the decompressed frame size.
  336. * Note: before using `compressedSize`, check for errors using ZSTD_isError().
  337. * similarly, before using `decompressedBound`, check for errors using:
  338. * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
  339. */
  340. typedef struct {
  341. size_t compressedSize;
  342. unsigned long long decompressedBound;
  343. } ZSTD_frameSizeInfo; /* decompress & legacy */
  344. const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
  345. void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
  346. /* custom memory allocation functions */
  347. void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
  348. void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
  349. void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
  350. MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
  351. {
  352. assert(val != 0);
  353. {
  354. # if (__GNUC__ >= 3) /* GCC Intrinsic */
  355. return __builtin_clz (val) ^ 31;
  356. # else /* Software version */
  357. static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
  358. U32 v = val;
  359. v |= v >> 1;
  360. v |= v >> 2;
  361. v |= v >> 4;
  362. v |= v >> 8;
  363. v |= v >> 16;
  364. return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
  365. # endif
  366. }
  367. }
  368. /* ZSTD_invalidateRepCodes() :
  369. * ensures next compression will not use repcodes from previous block.
  370. * Note : only works with regular variant;
  371. * do not use with extDict variant ! */
  372. void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
  373. typedef struct {
  374. blockType_e blockType;
  375. U32 lastBlock;
  376. U32 origSize;
  377. } blockProperties_t; /* declared here for decompress and fullbench */
  378. /*! ZSTD_getcBlockSize() :
  379. * Provides the size of compressed block from block header `src` */
  380. /* Used by: decompress, fullbench (does not get its definition from here) */
  381. size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
  382. blockProperties_t* bpPtr);
  383. /*! ZSTD_decodeSeqHeaders() :
  384. * decode sequence header from src */
  385. /* Used by: decompress, fullbench (does not get its definition from here) */
  386. size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
  387. const void* src, size_t srcSize);
  388. #endif /* ZSTD_CCOMMON_H_MODULE */