zstd_compress.c 215 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-*************************************
  11. * Dependencies
  12. ***************************************/
  13. #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
  14. #include "../common/cpu.h"
  15. #include "../common/mem.h"
  16. #include "hist.h" /* HIST_countFast_wksp */
  17. #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
  18. #include "../common/fse.h"
  19. #define HUF_STATIC_LINKING_ONLY
  20. #include "../common/huf.h"
  21. #include "zstd_compress_internal.h"
  22. #include "zstd_compress_sequences.h"
  23. #include "zstd_compress_literals.h"
  24. #include "zstd_fast.h"
  25. #include "zstd_double_fast.h"
  26. #include "zstd_lazy.h"
  27. #include "zstd_opt.h"
  28. #include "zstd_ldm.h"
  29. #include "zstd_compress_superblock.h"
  30. /* ***************************************************************
  31. * Tuning parameters
  32. *****************************************************************/
  33. /*!
  34. * COMPRESS_HEAPMODE :
  35. * Select how default decompression function ZSTD_compress() allocates its context,
  36. * on stack (0, default), or into heap (1).
  37. * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
  38. */
  39. /*-*************************************
  40. * Helper functions
  41. ***************************************/
  42. /* ZSTD_compressBound()
  43. * Note that the result from this function is only compatible with the "normal"
  44. * full-block strategy.
  45. * When there are a lot of small blocks due to frequent flush in streaming mode
  46. * the overhead of headers can make the compressed data to be larger than the
  47. * return value of ZSTD_compressBound().
  48. */
  49. size_t ZSTD_compressBound(size_t srcSize) {
  50. return ZSTD_COMPRESSBOUND(srcSize);
  51. }
  52. /*-*************************************
  53. * Context memory management
  54. ***************************************/
  55. struct ZSTD_CDict_s {
  56. const void* dictContent;
  57. size_t dictContentSize;
  58. ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
  59. U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
  60. ZSTD_cwksp workspace;
  61. ZSTD_matchState_t matchState;
  62. ZSTD_compressedBlockState_t cBlockState;
  63. ZSTD_customMem customMem;
  64. U32 dictID;
  65. int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
  66. }; /* typedef'd to ZSTD_CDict within "zstd.h" */
  67. ZSTD_CCtx* ZSTD_createCCtx(void)
  68. {
  69. return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
  70. }
  71. static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
  72. {
  73. assert(cctx != NULL);
  74. ZSTD_memset(cctx, 0, sizeof(*cctx));
  75. cctx->customMem = memManager;
  76. cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
  77. { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
  78. assert(!ZSTD_isError(err));
  79. (void)err;
  80. }
  81. }
  82. ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
  83. {
  84. ZSTD_STATIC_ASSERT(zcss_init==0);
  85. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
  86. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  87. { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
  88. if (!cctx) return NULL;
  89. ZSTD_initCCtx(cctx, customMem);
  90. return cctx;
  91. }
  92. }
  93. ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
  94. {
  95. ZSTD_cwksp ws;
  96. ZSTD_CCtx* cctx;
  97. if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
  98. if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
  99. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  100. cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
  101. if (cctx == NULL) return NULL;
  102. ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
  103. ZSTD_cwksp_move(&cctx->workspace, &ws);
  104. cctx->staticSize = workspaceSize;
  105. /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
  106. if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
  107. cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  108. cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
  109. cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
  110. cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
  111. return cctx;
  112. }
  113. /*
  114. * Clears and frees all of the dictionaries in the CCtx.
  115. */
  116. static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
  117. {
  118. ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
  119. ZSTD_freeCDict(cctx->localDict.cdict);
  120. ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
  121. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
  122. cctx->cdict = NULL;
  123. }
  124. static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
  125. {
  126. size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
  127. size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
  128. return bufferSize + cdictSize;
  129. }
  130. static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
  131. {
  132. assert(cctx != NULL);
  133. assert(cctx->staticSize == 0);
  134. ZSTD_clearAllDicts(cctx);
  135. ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
  136. }
  137. size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
  138. {
  139. if (cctx==NULL) return 0; /* support free on NULL */
  140. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  141. "not compatible with static CCtx");
  142. {
  143. int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
  144. ZSTD_freeCCtxContent(cctx);
  145. if (!cctxInWorkspace) {
  146. ZSTD_customFree(cctx, cctx->customMem);
  147. }
  148. }
  149. return 0;
  150. }
  151. static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
  152. {
  153. (void)cctx;
  154. return 0;
  155. }
  156. size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
  157. {
  158. if (cctx==NULL) return 0; /* support sizeof on NULL */
  159. /* cctx may be in the workspace */
  160. return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
  161. + ZSTD_cwksp_sizeof(&cctx->workspace)
  162. + ZSTD_sizeof_localDict(cctx->localDict)
  163. + ZSTD_sizeof_mtctx(cctx);
  164. }
  165. size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
  166. {
  167. return ZSTD_sizeof_CCtx(zcs); /* same object */
  168. }
  169. /* private API call, for dictBuilder only */
  170. const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
  171. /* Returns 1 if compression parameters are such that we should
  172. * enable long distance matching (wlog >= 27, strategy >= btopt).
  173. * Returns 0 otherwise.
  174. */
  175. static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
  176. return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
  177. }
  178. static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
  179. ZSTD_compressionParameters cParams)
  180. {
  181. ZSTD_CCtx_params cctxParams;
  182. /* should not matter, as all cParams are presumed properly defined */
  183. ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
  184. cctxParams.cParams = cParams;
  185. if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
  186. DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
  187. cctxParams.ldmParams.enableLdm = 1;
  188. /* LDM is enabled by default for optimal parser and window size >= 128MB */
  189. ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
  190. assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
  191. assert(cctxParams.ldmParams.hashRateLog < 32);
  192. }
  193. assert(!ZSTD_checkCParams(cParams));
  194. return cctxParams;
  195. }
  196. static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
  197. ZSTD_customMem customMem)
  198. {
  199. ZSTD_CCtx_params* params;
  200. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  201. params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
  202. sizeof(ZSTD_CCtx_params), customMem);
  203. if (!params) { return NULL; }
  204. ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  205. params->customMem = customMem;
  206. return params;
  207. }
  208. ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
  209. {
  210. return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
  211. }
  212. size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
  213. {
  214. if (params == NULL) { return 0; }
  215. ZSTD_customFree(params, params->customMem);
  216. return 0;
  217. }
  218. size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
  219. {
  220. return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
  221. }
  222. size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
  223. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  224. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  225. cctxParams->compressionLevel = compressionLevel;
  226. cctxParams->fParams.contentSizeFlag = 1;
  227. return 0;
  228. }
  229. #define ZSTD_NO_CLEVEL 0
  230. /*
  231. * Initializes the cctxParams from params and compressionLevel.
  232. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
  233. */
  234. static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
  235. {
  236. assert(!ZSTD_checkCParams(params->cParams));
  237. ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
  238. cctxParams->cParams = params->cParams;
  239. cctxParams->fParams = params->fParams;
  240. /* Should not matter, as all cParams are presumed properly defined.
  241. * But, set it for tracing anyway.
  242. */
  243. cctxParams->compressionLevel = compressionLevel;
  244. }
  245. size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
  246. {
  247. RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
  248. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  249. ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
  250. return 0;
  251. }
  252. /*
  253. * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
  254. * @param param Validated zstd parameters.
  255. */
  256. static void ZSTD_CCtxParams_setZstdParams(
  257. ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
  258. {
  259. assert(!ZSTD_checkCParams(params->cParams));
  260. cctxParams->cParams = params->cParams;
  261. cctxParams->fParams = params->fParams;
  262. /* Should not matter, as all cParams are presumed properly defined.
  263. * But, set it for tracing anyway.
  264. */
  265. cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
  266. }
  267. ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
  268. {
  269. ZSTD_bounds bounds = { 0, 0, 0 };
  270. switch(param)
  271. {
  272. case ZSTD_c_compressionLevel:
  273. bounds.lowerBound = ZSTD_minCLevel();
  274. bounds.upperBound = ZSTD_maxCLevel();
  275. return bounds;
  276. case ZSTD_c_windowLog:
  277. bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
  278. bounds.upperBound = ZSTD_WINDOWLOG_MAX;
  279. return bounds;
  280. case ZSTD_c_hashLog:
  281. bounds.lowerBound = ZSTD_HASHLOG_MIN;
  282. bounds.upperBound = ZSTD_HASHLOG_MAX;
  283. return bounds;
  284. case ZSTD_c_chainLog:
  285. bounds.lowerBound = ZSTD_CHAINLOG_MIN;
  286. bounds.upperBound = ZSTD_CHAINLOG_MAX;
  287. return bounds;
  288. case ZSTD_c_searchLog:
  289. bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
  290. bounds.upperBound = ZSTD_SEARCHLOG_MAX;
  291. return bounds;
  292. case ZSTD_c_minMatch:
  293. bounds.lowerBound = ZSTD_MINMATCH_MIN;
  294. bounds.upperBound = ZSTD_MINMATCH_MAX;
  295. return bounds;
  296. case ZSTD_c_targetLength:
  297. bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
  298. bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
  299. return bounds;
  300. case ZSTD_c_strategy:
  301. bounds.lowerBound = ZSTD_STRATEGY_MIN;
  302. bounds.upperBound = ZSTD_STRATEGY_MAX;
  303. return bounds;
  304. case ZSTD_c_contentSizeFlag:
  305. bounds.lowerBound = 0;
  306. bounds.upperBound = 1;
  307. return bounds;
  308. case ZSTD_c_checksumFlag:
  309. bounds.lowerBound = 0;
  310. bounds.upperBound = 1;
  311. return bounds;
  312. case ZSTD_c_dictIDFlag:
  313. bounds.lowerBound = 0;
  314. bounds.upperBound = 1;
  315. return bounds;
  316. case ZSTD_c_nbWorkers:
  317. bounds.lowerBound = 0;
  318. bounds.upperBound = 0;
  319. return bounds;
  320. case ZSTD_c_jobSize:
  321. bounds.lowerBound = 0;
  322. bounds.upperBound = 0;
  323. return bounds;
  324. case ZSTD_c_overlapLog:
  325. bounds.lowerBound = 0;
  326. bounds.upperBound = 0;
  327. return bounds;
  328. case ZSTD_c_enableDedicatedDictSearch:
  329. bounds.lowerBound = 0;
  330. bounds.upperBound = 1;
  331. return bounds;
  332. case ZSTD_c_enableLongDistanceMatching:
  333. bounds.lowerBound = 0;
  334. bounds.upperBound = 1;
  335. return bounds;
  336. case ZSTD_c_ldmHashLog:
  337. bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
  338. bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
  339. return bounds;
  340. case ZSTD_c_ldmMinMatch:
  341. bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
  342. bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
  343. return bounds;
  344. case ZSTD_c_ldmBucketSizeLog:
  345. bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
  346. bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
  347. return bounds;
  348. case ZSTD_c_ldmHashRateLog:
  349. bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
  350. bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
  351. return bounds;
  352. /* experimental parameters */
  353. case ZSTD_c_rsyncable:
  354. bounds.lowerBound = 0;
  355. bounds.upperBound = 1;
  356. return bounds;
  357. case ZSTD_c_forceMaxWindow :
  358. bounds.lowerBound = 0;
  359. bounds.upperBound = 1;
  360. return bounds;
  361. case ZSTD_c_format:
  362. ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
  363. bounds.lowerBound = ZSTD_f_zstd1;
  364. bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
  365. return bounds;
  366. case ZSTD_c_forceAttachDict:
  367. ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
  368. bounds.lowerBound = ZSTD_dictDefaultAttach;
  369. bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
  370. return bounds;
  371. case ZSTD_c_literalCompressionMode:
  372. ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
  373. bounds.lowerBound = ZSTD_lcm_auto;
  374. bounds.upperBound = ZSTD_lcm_uncompressed;
  375. return bounds;
  376. case ZSTD_c_targetCBlockSize:
  377. bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
  378. bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
  379. return bounds;
  380. case ZSTD_c_srcSizeHint:
  381. bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
  382. bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
  383. return bounds;
  384. case ZSTD_c_stableInBuffer:
  385. case ZSTD_c_stableOutBuffer:
  386. bounds.lowerBound = (int)ZSTD_bm_buffered;
  387. bounds.upperBound = (int)ZSTD_bm_stable;
  388. return bounds;
  389. case ZSTD_c_blockDelimiters:
  390. bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
  391. bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
  392. return bounds;
  393. case ZSTD_c_validateSequences:
  394. bounds.lowerBound = 0;
  395. bounds.upperBound = 1;
  396. return bounds;
  397. default:
  398. bounds.error = ERROR(parameter_unsupported);
  399. return bounds;
  400. }
  401. }
  402. /* ZSTD_cParam_clampBounds:
  403. * Clamps the value into the bounded range.
  404. */
  405. static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
  406. {
  407. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
  408. if (ZSTD_isError(bounds.error)) return bounds.error;
  409. if (*value < bounds.lowerBound) *value = bounds.lowerBound;
  410. if (*value > bounds.upperBound) *value = bounds.upperBound;
  411. return 0;
  412. }
  413. #define BOUNDCHECK(cParam, val) { \
  414. RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
  415. parameter_outOfBound, "Param out of bounds"); \
  416. }
  417. static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
  418. {
  419. switch(param)
  420. {
  421. case ZSTD_c_compressionLevel:
  422. case ZSTD_c_hashLog:
  423. case ZSTD_c_chainLog:
  424. case ZSTD_c_searchLog:
  425. case ZSTD_c_minMatch:
  426. case ZSTD_c_targetLength:
  427. case ZSTD_c_strategy:
  428. return 1;
  429. case ZSTD_c_format:
  430. case ZSTD_c_windowLog:
  431. case ZSTD_c_contentSizeFlag:
  432. case ZSTD_c_checksumFlag:
  433. case ZSTD_c_dictIDFlag:
  434. case ZSTD_c_forceMaxWindow :
  435. case ZSTD_c_nbWorkers:
  436. case ZSTD_c_jobSize:
  437. case ZSTD_c_overlapLog:
  438. case ZSTD_c_rsyncable:
  439. case ZSTD_c_enableDedicatedDictSearch:
  440. case ZSTD_c_enableLongDistanceMatching:
  441. case ZSTD_c_ldmHashLog:
  442. case ZSTD_c_ldmMinMatch:
  443. case ZSTD_c_ldmBucketSizeLog:
  444. case ZSTD_c_ldmHashRateLog:
  445. case ZSTD_c_forceAttachDict:
  446. case ZSTD_c_literalCompressionMode:
  447. case ZSTD_c_targetCBlockSize:
  448. case ZSTD_c_srcSizeHint:
  449. case ZSTD_c_stableInBuffer:
  450. case ZSTD_c_stableOutBuffer:
  451. case ZSTD_c_blockDelimiters:
  452. case ZSTD_c_validateSequences:
  453. default:
  454. return 0;
  455. }
  456. }
  457. size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
  458. {
  459. DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
  460. if (cctx->streamStage != zcss_init) {
  461. if (ZSTD_isUpdateAuthorized(param)) {
  462. cctx->cParamsChanged = 1;
  463. } else {
  464. RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
  465. } }
  466. switch(param)
  467. {
  468. case ZSTD_c_nbWorkers:
  469. RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
  470. "MT not compatible with static alloc");
  471. break;
  472. case ZSTD_c_compressionLevel:
  473. case ZSTD_c_windowLog:
  474. case ZSTD_c_hashLog:
  475. case ZSTD_c_chainLog:
  476. case ZSTD_c_searchLog:
  477. case ZSTD_c_minMatch:
  478. case ZSTD_c_targetLength:
  479. case ZSTD_c_strategy:
  480. case ZSTD_c_ldmHashRateLog:
  481. case ZSTD_c_format:
  482. case ZSTD_c_contentSizeFlag:
  483. case ZSTD_c_checksumFlag:
  484. case ZSTD_c_dictIDFlag:
  485. case ZSTD_c_forceMaxWindow:
  486. case ZSTD_c_forceAttachDict:
  487. case ZSTD_c_literalCompressionMode:
  488. case ZSTD_c_jobSize:
  489. case ZSTD_c_overlapLog:
  490. case ZSTD_c_rsyncable:
  491. case ZSTD_c_enableDedicatedDictSearch:
  492. case ZSTD_c_enableLongDistanceMatching:
  493. case ZSTD_c_ldmHashLog:
  494. case ZSTD_c_ldmMinMatch:
  495. case ZSTD_c_ldmBucketSizeLog:
  496. case ZSTD_c_targetCBlockSize:
  497. case ZSTD_c_srcSizeHint:
  498. case ZSTD_c_stableInBuffer:
  499. case ZSTD_c_stableOutBuffer:
  500. case ZSTD_c_blockDelimiters:
  501. case ZSTD_c_validateSequences:
  502. break;
  503. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  504. }
  505. return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
  506. }
  507. size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
  508. ZSTD_cParameter param, int value)
  509. {
  510. DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
  511. switch(param)
  512. {
  513. case ZSTD_c_format :
  514. BOUNDCHECK(ZSTD_c_format, value);
  515. CCtxParams->format = (ZSTD_format_e)value;
  516. return (size_t)CCtxParams->format;
  517. case ZSTD_c_compressionLevel : {
  518. FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
  519. if (value == 0)
  520. CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  521. else
  522. CCtxParams->compressionLevel = value;
  523. if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
  524. return 0; /* return type (size_t) cannot represent negative values */
  525. }
  526. case ZSTD_c_windowLog :
  527. if (value!=0) /* 0 => use default */
  528. BOUNDCHECK(ZSTD_c_windowLog, value);
  529. CCtxParams->cParams.windowLog = (U32)value;
  530. return CCtxParams->cParams.windowLog;
  531. case ZSTD_c_hashLog :
  532. if (value!=0) /* 0 => use default */
  533. BOUNDCHECK(ZSTD_c_hashLog, value);
  534. CCtxParams->cParams.hashLog = (U32)value;
  535. return CCtxParams->cParams.hashLog;
  536. case ZSTD_c_chainLog :
  537. if (value!=0) /* 0 => use default */
  538. BOUNDCHECK(ZSTD_c_chainLog, value);
  539. CCtxParams->cParams.chainLog = (U32)value;
  540. return CCtxParams->cParams.chainLog;
  541. case ZSTD_c_searchLog :
  542. if (value!=0) /* 0 => use default */
  543. BOUNDCHECK(ZSTD_c_searchLog, value);
  544. CCtxParams->cParams.searchLog = (U32)value;
  545. return (size_t)value;
  546. case ZSTD_c_minMatch :
  547. if (value!=0) /* 0 => use default */
  548. BOUNDCHECK(ZSTD_c_minMatch, value);
  549. CCtxParams->cParams.minMatch = value;
  550. return CCtxParams->cParams.minMatch;
  551. case ZSTD_c_targetLength :
  552. BOUNDCHECK(ZSTD_c_targetLength, value);
  553. CCtxParams->cParams.targetLength = value;
  554. return CCtxParams->cParams.targetLength;
  555. case ZSTD_c_strategy :
  556. if (value!=0) /* 0 => use default */
  557. BOUNDCHECK(ZSTD_c_strategy, value);
  558. CCtxParams->cParams.strategy = (ZSTD_strategy)value;
  559. return (size_t)CCtxParams->cParams.strategy;
  560. case ZSTD_c_contentSizeFlag :
  561. /* Content size written in frame header _when known_ (default:1) */
  562. DEBUGLOG(4, "set content size flag = %u", (value!=0));
  563. CCtxParams->fParams.contentSizeFlag = value != 0;
  564. return CCtxParams->fParams.contentSizeFlag;
  565. case ZSTD_c_checksumFlag :
  566. /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
  567. CCtxParams->fParams.checksumFlag = value != 0;
  568. return CCtxParams->fParams.checksumFlag;
  569. case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
  570. DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
  571. CCtxParams->fParams.noDictIDFlag = !value;
  572. return !CCtxParams->fParams.noDictIDFlag;
  573. case ZSTD_c_forceMaxWindow :
  574. CCtxParams->forceWindow = (value != 0);
  575. return CCtxParams->forceWindow;
  576. case ZSTD_c_forceAttachDict : {
  577. const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
  578. BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
  579. CCtxParams->attachDictPref = pref;
  580. return CCtxParams->attachDictPref;
  581. }
  582. case ZSTD_c_literalCompressionMode : {
  583. const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
  584. BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
  585. CCtxParams->literalCompressionMode = lcm;
  586. return CCtxParams->literalCompressionMode;
  587. }
  588. case ZSTD_c_nbWorkers :
  589. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  590. return 0;
  591. case ZSTD_c_jobSize :
  592. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  593. return 0;
  594. case ZSTD_c_overlapLog :
  595. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  596. return 0;
  597. case ZSTD_c_rsyncable :
  598. RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
  599. return 0;
  600. case ZSTD_c_enableDedicatedDictSearch :
  601. CCtxParams->enableDedicatedDictSearch = (value!=0);
  602. return CCtxParams->enableDedicatedDictSearch;
  603. case ZSTD_c_enableLongDistanceMatching :
  604. CCtxParams->ldmParams.enableLdm = (value!=0);
  605. return CCtxParams->ldmParams.enableLdm;
  606. case ZSTD_c_ldmHashLog :
  607. if (value!=0) /* 0 ==> auto */
  608. BOUNDCHECK(ZSTD_c_ldmHashLog, value);
  609. CCtxParams->ldmParams.hashLog = value;
  610. return CCtxParams->ldmParams.hashLog;
  611. case ZSTD_c_ldmMinMatch :
  612. if (value!=0) /* 0 ==> default */
  613. BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
  614. CCtxParams->ldmParams.minMatchLength = value;
  615. return CCtxParams->ldmParams.minMatchLength;
  616. case ZSTD_c_ldmBucketSizeLog :
  617. if (value!=0) /* 0 ==> default */
  618. BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
  619. CCtxParams->ldmParams.bucketSizeLog = value;
  620. return CCtxParams->ldmParams.bucketSizeLog;
  621. case ZSTD_c_ldmHashRateLog :
  622. if (value!=0) /* 0 ==> default */
  623. BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
  624. CCtxParams->ldmParams.hashRateLog = value;
  625. return CCtxParams->ldmParams.hashRateLog;
  626. case ZSTD_c_targetCBlockSize :
  627. if (value!=0) /* 0 ==> default */
  628. BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
  629. CCtxParams->targetCBlockSize = value;
  630. return CCtxParams->targetCBlockSize;
  631. case ZSTD_c_srcSizeHint :
  632. if (value!=0) /* 0 ==> default */
  633. BOUNDCHECK(ZSTD_c_srcSizeHint, value);
  634. CCtxParams->srcSizeHint = value;
  635. return CCtxParams->srcSizeHint;
  636. case ZSTD_c_stableInBuffer:
  637. BOUNDCHECK(ZSTD_c_stableInBuffer, value);
  638. CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
  639. return CCtxParams->inBufferMode;
  640. case ZSTD_c_stableOutBuffer:
  641. BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
  642. CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
  643. return CCtxParams->outBufferMode;
  644. case ZSTD_c_blockDelimiters:
  645. BOUNDCHECK(ZSTD_c_blockDelimiters, value);
  646. CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
  647. return CCtxParams->blockDelimiters;
  648. case ZSTD_c_validateSequences:
  649. BOUNDCHECK(ZSTD_c_validateSequences, value);
  650. CCtxParams->validateSequences = value;
  651. return CCtxParams->validateSequences;
  652. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  653. }
  654. }
  655. size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
  656. {
  657. return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
  658. }
  659. size_t ZSTD_CCtxParams_getParameter(
  660. ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
  661. {
  662. switch(param)
  663. {
  664. case ZSTD_c_format :
  665. *value = CCtxParams->format;
  666. break;
  667. case ZSTD_c_compressionLevel :
  668. *value = CCtxParams->compressionLevel;
  669. break;
  670. case ZSTD_c_windowLog :
  671. *value = (int)CCtxParams->cParams.windowLog;
  672. break;
  673. case ZSTD_c_hashLog :
  674. *value = (int)CCtxParams->cParams.hashLog;
  675. break;
  676. case ZSTD_c_chainLog :
  677. *value = (int)CCtxParams->cParams.chainLog;
  678. break;
  679. case ZSTD_c_searchLog :
  680. *value = CCtxParams->cParams.searchLog;
  681. break;
  682. case ZSTD_c_minMatch :
  683. *value = CCtxParams->cParams.minMatch;
  684. break;
  685. case ZSTD_c_targetLength :
  686. *value = CCtxParams->cParams.targetLength;
  687. break;
  688. case ZSTD_c_strategy :
  689. *value = (unsigned)CCtxParams->cParams.strategy;
  690. break;
  691. case ZSTD_c_contentSizeFlag :
  692. *value = CCtxParams->fParams.contentSizeFlag;
  693. break;
  694. case ZSTD_c_checksumFlag :
  695. *value = CCtxParams->fParams.checksumFlag;
  696. break;
  697. case ZSTD_c_dictIDFlag :
  698. *value = !CCtxParams->fParams.noDictIDFlag;
  699. break;
  700. case ZSTD_c_forceMaxWindow :
  701. *value = CCtxParams->forceWindow;
  702. break;
  703. case ZSTD_c_forceAttachDict :
  704. *value = CCtxParams->attachDictPref;
  705. break;
  706. case ZSTD_c_literalCompressionMode :
  707. *value = CCtxParams->literalCompressionMode;
  708. break;
  709. case ZSTD_c_nbWorkers :
  710. assert(CCtxParams->nbWorkers == 0);
  711. *value = CCtxParams->nbWorkers;
  712. break;
  713. case ZSTD_c_jobSize :
  714. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  715. case ZSTD_c_overlapLog :
  716. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  717. case ZSTD_c_rsyncable :
  718. RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
  719. case ZSTD_c_enableDedicatedDictSearch :
  720. *value = CCtxParams->enableDedicatedDictSearch;
  721. break;
  722. case ZSTD_c_enableLongDistanceMatching :
  723. *value = CCtxParams->ldmParams.enableLdm;
  724. break;
  725. case ZSTD_c_ldmHashLog :
  726. *value = CCtxParams->ldmParams.hashLog;
  727. break;
  728. case ZSTD_c_ldmMinMatch :
  729. *value = CCtxParams->ldmParams.minMatchLength;
  730. break;
  731. case ZSTD_c_ldmBucketSizeLog :
  732. *value = CCtxParams->ldmParams.bucketSizeLog;
  733. break;
  734. case ZSTD_c_ldmHashRateLog :
  735. *value = CCtxParams->ldmParams.hashRateLog;
  736. break;
  737. case ZSTD_c_targetCBlockSize :
  738. *value = (int)CCtxParams->targetCBlockSize;
  739. break;
  740. case ZSTD_c_srcSizeHint :
  741. *value = (int)CCtxParams->srcSizeHint;
  742. break;
  743. case ZSTD_c_stableInBuffer :
  744. *value = (int)CCtxParams->inBufferMode;
  745. break;
  746. case ZSTD_c_stableOutBuffer :
  747. *value = (int)CCtxParams->outBufferMode;
  748. break;
  749. case ZSTD_c_blockDelimiters :
  750. *value = (int)CCtxParams->blockDelimiters;
  751. break;
  752. case ZSTD_c_validateSequences :
  753. *value = (int)CCtxParams->validateSequences;
  754. break;
  755. default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
  756. }
  757. return 0;
  758. }
  759. /* ZSTD_CCtx_setParametersUsingCCtxParams() :
  760. * just applies `params` into `cctx`
  761. * no action is performed, parameters are merely stored.
  762. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
  763. * This is possible even if a compression is ongoing.
  764. * In which case, new parameters will be applied on the fly, starting with next compression job.
  765. */
  766. size_t ZSTD_CCtx_setParametersUsingCCtxParams(
  767. ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
  768. {
  769. DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
  770. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  771. "The context is in the wrong stage!");
  772. RETURN_ERROR_IF(cctx->cdict, stage_wrong,
  773. "Can't override parameters with cdict attached (some must "
  774. "be inherited from the cdict).");
  775. cctx->requestedParams = *params;
  776. return 0;
  777. }
  778. ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
  779. {
  780. DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
  781. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  782. "Can't set pledgedSrcSize when not in init stage.");
  783. cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  784. return 0;
  785. }
  786. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
  787. int const compressionLevel,
  788. size_t const dictSize);
  789. static int ZSTD_dedicatedDictSearch_isSupported(
  790. const ZSTD_compressionParameters* cParams);
  791. static void ZSTD_dedicatedDictSearch_revertCParams(
  792. ZSTD_compressionParameters* cParams);
  793. /*
  794. * Initializes the local dict using the requested parameters.
  795. * NOTE: This does not use the pledged src size, because it may be used for more
  796. * than one compression.
  797. */
  798. static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
  799. {
  800. ZSTD_localDict* const dl = &cctx->localDict;
  801. if (dl->dict == NULL) {
  802. /* No local dictionary. */
  803. assert(dl->dictBuffer == NULL);
  804. assert(dl->cdict == NULL);
  805. assert(dl->dictSize == 0);
  806. return 0;
  807. }
  808. if (dl->cdict != NULL) {
  809. assert(cctx->cdict == dl->cdict);
  810. /* Local dictionary already initialized. */
  811. return 0;
  812. }
  813. assert(dl->dictSize > 0);
  814. assert(cctx->cdict == NULL);
  815. assert(cctx->prefixDict.dict == NULL);
  816. dl->cdict = ZSTD_createCDict_advanced2(
  817. dl->dict,
  818. dl->dictSize,
  819. ZSTD_dlm_byRef,
  820. dl->dictContentType,
  821. &cctx->requestedParams,
  822. cctx->customMem);
  823. RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
  824. cctx->cdict = dl->cdict;
  825. return 0;
  826. }
  827. size_t ZSTD_CCtx_loadDictionary_advanced(
  828. ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
  829. ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
  830. {
  831. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  832. "Can't load a dictionary when ctx is not in init stage.");
  833. DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
  834. ZSTD_clearAllDicts(cctx); /* in case one already exists */
  835. if (dict == NULL || dictSize == 0) /* no dictionary mode */
  836. return 0;
  837. if (dictLoadMethod == ZSTD_dlm_byRef) {
  838. cctx->localDict.dict = dict;
  839. } else {
  840. void* dictBuffer;
  841. RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
  842. "no malloc for static CCtx");
  843. dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
  844. RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
  845. ZSTD_memcpy(dictBuffer, dict, dictSize);
  846. cctx->localDict.dictBuffer = dictBuffer;
  847. cctx->localDict.dict = dictBuffer;
  848. }
  849. cctx->localDict.dictSize = dictSize;
  850. cctx->localDict.dictContentType = dictContentType;
  851. return 0;
  852. }
  853. ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
  854. ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  855. {
  856. return ZSTD_CCtx_loadDictionary_advanced(
  857. cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
  858. }
  859. ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
  860. {
  861. return ZSTD_CCtx_loadDictionary_advanced(
  862. cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
  863. }
  864. size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  865. {
  866. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  867. "Can't ref a dict when ctx not in init stage.");
  868. /* Free the existing local cdict (if any) to save memory. */
  869. ZSTD_clearAllDicts(cctx);
  870. cctx->cdict = cdict;
  871. return 0;
  872. }
  873. size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
  874. {
  875. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  876. "Can't ref a pool when ctx not in init stage.");
  877. cctx->pool = pool;
  878. return 0;
  879. }
  880. size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
  881. {
  882. return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
  883. }
  884. size_t ZSTD_CCtx_refPrefix_advanced(
  885. ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
  886. {
  887. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  888. "Can't ref a prefix when ctx not in init stage.");
  889. ZSTD_clearAllDicts(cctx);
  890. if (prefix != NULL && prefixSize > 0) {
  891. cctx->prefixDict.dict = prefix;
  892. cctx->prefixDict.dictSize = prefixSize;
  893. cctx->prefixDict.dictContentType = dictContentType;
  894. }
  895. return 0;
  896. }
  897. /*! ZSTD_CCtx_reset() :
  898. * Also dumps dictionary */
  899. size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
  900. {
  901. if ( (reset == ZSTD_reset_session_only)
  902. || (reset == ZSTD_reset_session_and_parameters) ) {
  903. cctx->streamStage = zcss_init;
  904. cctx->pledgedSrcSizePlusOne = 0;
  905. }
  906. if ( (reset == ZSTD_reset_parameters)
  907. || (reset == ZSTD_reset_session_and_parameters) ) {
  908. RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
  909. "Can't reset parameters only when not in init stage.");
  910. ZSTD_clearAllDicts(cctx);
  911. return ZSTD_CCtxParams_reset(&cctx->requestedParams);
  912. }
  913. return 0;
  914. }
  915. /* ZSTD_checkCParams() :
  916. control CParam values remain within authorized range.
  917. @return : 0, or an error code if one value is beyond authorized range */
  918. size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
  919. {
  920. BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
  921. BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
  922. BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
  923. BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
  924. BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
  925. BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
  926. BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
  927. return 0;
  928. }
  929. /* ZSTD_clampCParams() :
  930. * make CParam values within valid range.
  931. * @return : valid CParams */
  932. static ZSTD_compressionParameters
  933. ZSTD_clampCParams(ZSTD_compressionParameters cParams)
  934. {
  935. # define CLAMP_TYPE(cParam, val, type) { \
  936. ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
  937. if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
  938. else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
  939. }
  940. # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
  941. CLAMP(ZSTD_c_windowLog, cParams.windowLog);
  942. CLAMP(ZSTD_c_chainLog, cParams.chainLog);
  943. CLAMP(ZSTD_c_hashLog, cParams.hashLog);
  944. CLAMP(ZSTD_c_searchLog, cParams.searchLog);
  945. CLAMP(ZSTD_c_minMatch, cParams.minMatch);
  946. CLAMP(ZSTD_c_targetLength,cParams.targetLength);
  947. CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
  948. return cParams;
  949. }
  950. /* ZSTD_cycleLog() :
  951. * condition for correct operation : hashLog > 1 */
  952. U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
  953. {
  954. U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
  955. return hashLog - btScale;
  956. }
  957. /* ZSTD_dictAndWindowLog() :
  958. * Returns an adjusted window log that is large enough to fit the source and the dictionary.
  959. * The zstd format says that the entire dictionary is valid if one byte of the dictionary
  960. * is within the window. So the hashLog and chainLog should be large enough to reference both
  961. * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
  962. * the hashLog and windowLog.
  963. * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
  964. */
  965. static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
  966. {
  967. const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
  968. /* No dictionary ==> No change */
  969. if (dictSize == 0) {
  970. return windowLog;
  971. }
  972. assert(windowLog <= ZSTD_WINDOWLOG_MAX);
  973. assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
  974. {
  975. U64 const windowSize = 1ULL << windowLog;
  976. U64 const dictAndWindowSize = dictSize + windowSize;
  977. /* If the window size is already large enough to fit both the source and the dictionary
  978. * then just use the window size. Otherwise adjust so that it fits the dictionary and
  979. * the window.
  980. */
  981. if (windowSize >= dictSize + srcSize) {
  982. return windowLog; /* Window size large enough already */
  983. } else if (dictAndWindowSize >= maxWindowSize) {
  984. return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
  985. } else {
  986. return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
  987. }
  988. }
  989. }
  990. /* ZSTD_adjustCParams_internal() :
  991. * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
  992. * mostly downsize to reduce memory consumption and initialization latency.
  993. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
  994. * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
  995. * note : `srcSize==0` means 0!
  996. * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
  997. static ZSTD_compressionParameters
  998. ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
  999. unsigned long long srcSize,
  1000. size_t dictSize,
  1001. ZSTD_cParamMode_e mode)
  1002. {
  1003. const U64 minSrcSize = 513; /* (1<<9) + 1 */
  1004. const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
  1005. assert(ZSTD_checkCParams(cPar)==0);
  1006. switch (mode) {
  1007. case ZSTD_cpm_unknown:
  1008. case ZSTD_cpm_noAttachDict:
  1009. /* If we don't know the source size, don't make any
  1010. * assumptions about it. We will already have selected
  1011. * smaller parameters if a dictionary is in use.
  1012. */
  1013. break;
  1014. case ZSTD_cpm_createCDict:
  1015. /* Assume a small source size when creating a dictionary
  1016. * with an unkown source size.
  1017. */
  1018. if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1019. srcSize = minSrcSize;
  1020. break;
  1021. case ZSTD_cpm_attachDict:
  1022. /* Dictionary has its own dedicated parameters which have
  1023. * already been selected. We are selecting parameters
  1024. * for only the source.
  1025. */
  1026. dictSize = 0;
  1027. break;
  1028. default:
  1029. assert(0);
  1030. break;
  1031. }
  1032. /* resize windowLog if input is small enough, to use less memory */
  1033. if ( (srcSize < maxWindowResize)
  1034. && (dictSize < maxWindowResize) ) {
  1035. U32 const tSize = (U32)(srcSize + dictSize);
  1036. static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
  1037. U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
  1038. ZSTD_highbit32(tSize-1) + 1;
  1039. if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
  1040. }
  1041. if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
  1042. U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
  1043. U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
  1044. if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
  1045. if (cycleLog > dictAndWindowLog)
  1046. cPar.chainLog -= (cycleLog - dictAndWindowLog);
  1047. }
  1048. if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
  1049. cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
  1050. return cPar;
  1051. }
  1052. ZSTD_compressionParameters
  1053. ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
  1054. unsigned long long srcSize,
  1055. size_t dictSize)
  1056. {
  1057. cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
  1058. if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  1059. return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
  1060. }
  1061. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1062. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
  1063. static void ZSTD_overrideCParams(
  1064. ZSTD_compressionParameters* cParams,
  1065. const ZSTD_compressionParameters* overrides)
  1066. {
  1067. if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
  1068. if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
  1069. if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
  1070. if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
  1071. if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
  1072. if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
  1073. if (overrides->strategy) cParams->strategy = overrides->strategy;
  1074. }
  1075. ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
  1076. const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  1077. {
  1078. ZSTD_compressionParameters cParams;
  1079. if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
  1080. srcSizeHint = CCtxParams->srcSizeHint;
  1081. }
  1082. cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
  1083. if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
  1084. ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
  1085. assert(!ZSTD_checkCParams(cParams));
  1086. /* srcSizeHint == 0 means 0 */
  1087. return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
  1088. }
  1089. static size_t
  1090. ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
  1091. const U32 forCCtx)
  1092. {
  1093. size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
  1094. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1095. U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1096. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1097. /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
  1098. * surrounded by redzones in ASAN. */
  1099. size_t const tableSpace = chainSize * sizeof(U32)
  1100. + hSize * sizeof(U32)
  1101. + h3Size * sizeof(U32);
  1102. size_t const optPotentialSpace =
  1103. ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
  1104. + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
  1105. + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
  1106. + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
  1107. + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
  1108. + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1109. size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
  1110. ? optPotentialSpace
  1111. : 0;
  1112. DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
  1113. (U32)chainSize, (U32)hSize, (U32)h3Size);
  1114. return tableSpace + optSpace;
  1115. }
  1116. static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1117. const ZSTD_compressionParameters* cParams,
  1118. const ldmParams_t* ldmParams,
  1119. const int isStatic,
  1120. const size_t buffInSize,
  1121. const size_t buffOutSize,
  1122. const U64 pledgedSrcSize)
  1123. {
  1124. size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
  1125. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1126. U32 const divider = (cParams->minMatch==3) ? 3 : 4;
  1127. size_t const maxNbSeq = blockSize / divider;
  1128. size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
  1129. + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
  1130. + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
  1131. size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
  1132. size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
  1133. size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
  1134. size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
  1135. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
  1136. size_t const ldmSeqSpace = ldmParams->enableLdm ?
  1137. ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
  1138. size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
  1139. + ZSTD_cwksp_alloc_size(buffOutSize);
  1140. size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
  1141. size_t const neededSpace =
  1142. cctxSpace +
  1143. entropySpace +
  1144. blockStateSpace +
  1145. ldmSpace +
  1146. ldmSeqSpace +
  1147. matchStateSize +
  1148. tokenSpace +
  1149. bufferSpace;
  1150. DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
  1151. return neededSpace;
  1152. }
  1153. size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1154. {
  1155. ZSTD_compressionParameters const cParams =
  1156. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1157. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1158. /* estimateCCtxSize is for one-shot compression. So no buffers should
  1159. * be needed. However, we still allocate two 0-sized buffers, which can
  1160. * take space under ASAN. */
  1161. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1162. &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
  1163. }
  1164. size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
  1165. {
  1166. ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
  1167. return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
  1168. }
  1169. static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
  1170. {
  1171. int tier = 0;
  1172. size_t largestSize = 0;
  1173. static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
  1174. for (; tier < 4; ++tier) {
  1175. /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
  1176. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
  1177. largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
  1178. }
  1179. return largestSize;
  1180. }
  1181. size_t ZSTD_estimateCCtxSize(int compressionLevel)
  1182. {
  1183. int level;
  1184. size_t memBudget = 0;
  1185. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1186. /* Ensure monotonically increasing memory usage as compression level increases */
  1187. size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
  1188. if (newMB > memBudget) memBudget = newMB;
  1189. }
  1190. return memBudget;
  1191. }
  1192. size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
  1193. {
  1194. RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
  1195. { ZSTD_compressionParameters const cParams =
  1196. ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1197. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
  1198. size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
  1199. ? ((size_t)1 << cParams.windowLog) + blockSize
  1200. : 0;
  1201. size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
  1202. ? ZSTD_compressBound(blockSize) + 1
  1203. : 0;
  1204. return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1205. &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
  1206. ZSTD_CONTENTSIZE_UNKNOWN);
  1207. }
  1208. }
  1209. size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
  1210. {
  1211. ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
  1212. return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
  1213. }
  1214. static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
  1215. {
  1216. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
  1217. return ZSTD_estimateCStreamSize_usingCParams(cParams);
  1218. }
  1219. size_t ZSTD_estimateCStreamSize(int compressionLevel)
  1220. {
  1221. int level;
  1222. size_t memBudget = 0;
  1223. for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
  1224. size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
  1225. if (newMB > memBudget) memBudget = newMB;
  1226. }
  1227. return memBudget;
  1228. }
  1229. /* ZSTD_getFrameProgression():
  1230. * tells how much data has been consumed (input) and produced (output) for current frame.
  1231. * able to count progression inside worker threads (non-blocking mode).
  1232. */
  1233. ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
  1234. {
  1235. { ZSTD_frameProgression fp;
  1236. size_t const buffered = (cctx->inBuff == NULL) ? 0 :
  1237. cctx->inBuffPos - cctx->inToCompress;
  1238. if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
  1239. assert(buffered <= ZSTD_BLOCKSIZE_MAX);
  1240. fp.ingested = cctx->consumedSrcSize + buffered;
  1241. fp.consumed = cctx->consumedSrcSize;
  1242. fp.produced = cctx->producedCSize;
  1243. fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
  1244. fp.currentJobID = 0;
  1245. fp.nbActiveWorkers = 0;
  1246. return fp;
  1247. } }
  1248. /*! ZSTD_toFlushNow()
  1249. * Only useful for multithreading scenarios currently (nbWorkers >= 1).
  1250. */
  1251. size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
  1252. {
  1253. (void)cctx;
  1254. return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
  1255. }
  1256. static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
  1257. ZSTD_compressionParameters cParams2)
  1258. {
  1259. (void)cParams1;
  1260. (void)cParams2;
  1261. assert(cParams1.windowLog == cParams2.windowLog);
  1262. assert(cParams1.chainLog == cParams2.chainLog);
  1263. assert(cParams1.hashLog == cParams2.hashLog);
  1264. assert(cParams1.searchLog == cParams2.searchLog);
  1265. assert(cParams1.minMatch == cParams2.minMatch);
  1266. assert(cParams1.targetLength == cParams2.targetLength);
  1267. assert(cParams1.strategy == cParams2.strategy);
  1268. }
  1269. void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
  1270. {
  1271. int i;
  1272. for (i = 0; i < ZSTD_REP_NUM; ++i)
  1273. bs->rep[i] = repStartValue[i];
  1274. bs->entropy.huf.repeatMode = HUF_repeat_none;
  1275. bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
  1276. bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
  1277. bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
  1278. }
  1279. /*! ZSTD_invalidateMatchState()
  1280. * Invalidate all the matches in the match finder tables.
  1281. * Requires nextSrc and base to be set (can be NULL).
  1282. */
  1283. static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
  1284. {
  1285. ZSTD_window_clear(&ms->window);
  1286. ms->nextToUpdate = ms->window.dictLimit;
  1287. ms->loadedDictEnd = 0;
  1288. ms->opt.litLengthSum = 0; /* force reset of btopt stats */
  1289. ms->dictMatchState = NULL;
  1290. }
  1291. /*
  1292. * Controls, for this matchState reset, whether the tables need to be cleared /
  1293. * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
  1294. * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
  1295. * subsequent operation will overwrite the table space anyways (e.g., copying
  1296. * the matchState contents in from a CDict).
  1297. */
  1298. typedef enum {
  1299. ZSTDcrp_makeClean,
  1300. ZSTDcrp_leaveDirty
  1301. } ZSTD_compResetPolicy_e;
  1302. /*
  1303. * Controls, for this matchState reset, whether indexing can continue where it
  1304. * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
  1305. * (ZSTDirp_reset).
  1306. */
  1307. typedef enum {
  1308. ZSTDirp_continue,
  1309. ZSTDirp_reset
  1310. } ZSTD_indexResetPolicy_e;
  1311. typedef enum {
  1312. ZSTD_resetTarget_CDict,
  1313. ZSTD_resetTarget_CCtx
  1314. } ZSTD_resetTarget_e;
  1315. static size_t
  1316. ZSTD_reset_matchState(ZSTD_matchState_t* ms,
  1317. ZSTD_cwksp* ws,
  1318. const ZSTD_compressionParameters* cParams,
  1319. const ZSTD_compResetPolicy_e crp,
  1320. const ZSTD_indexResetPolicy_e forceResetIndex,
  1321. const ZSTD_resetTarget_e forWho)
  1322. {
  1323. size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
  1324. size_t const hSize = ((size_t)1) << cParams->hashLog;
  1325. U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
  1326. size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
  1327. DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
  1328. if (forceResetIndex == ZSTDirp_reset) {
  1329. ZSTD_window_init(&ms->window);
  1330. ZSTD_cwksp_mark_tables_dirty(ws);
  1331. }
  1332. ms->hashLog3 = hashLog3;
  1333. ZSTD_invalidateMatchState(ms);
  1334. assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
  1335. ZSTD_cwksp_clear_tables(ws);
  1336. DEBUGLOG(5, "reserving table space");
  1337. /* table Space */
  1338. ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
  1339. ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
  1340. ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
  1341. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1342. "failed a workspace allocation in ZSTD_reset_matchState");
  1343. DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
  1344. if (crp!=ZSTDcrp_leaveDirty) {
  1345. /* reset tables only */
  1346. ZSTD_cwksp_clean_tables(ws);
  1347. }
  1348. /* opt parser space */
  1349. if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
  1350. DEBUGLOG(4, "reserving optimal parser space");
  1351. ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
  1352. ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
  1353. ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
  1354. ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
  1355. ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
  1356. ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
  1357. }
  1358. ms->cParams = *cParams;
  1359. RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
  1360. "failed a workspace allocation in ZSTD_reset_matchState");
  1361. return 0;
  1362. }
  1363. /* ZSTD_indexTooCloseToMax() :
  1364. * minor optimization : prefer memset() rather than reduceIndex()
  1365. * which is measurably slow in some circumstances (reported for Visual Studio).
  1366. * Works when re-using a context for a lot of smallish inputs :
  1367. * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
  1368. * memset() will be triggered before reduceIndex().
  1369. */
  1370. #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
  1371. static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
  1372. {
  1373. return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
  1374. }
  1375. /*! ZSTD_resetCCtx_internal() :
  1376. note : `params` are assumed fully validated at this stage */
  1377. static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
  1378. ZSTD_CCtx_params params,
  1379. U64 const pledgedSrcSize,
  1380. ZSTD_compResetPolicy_e const crp,
  1381. ZSTD_buffered_policy_e const zbuff)
  1382. {
  1383. ZSTD_cwksp* const ws = &zc->workspace;
  1384. DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
  1385. (U32)pledgedSrcSize, params.cParams.windowLog);
  1386. assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
  1387. zc->isFirstBlock = 1;
  1388. if (params.ldmParams.enableLdm) {
  1389. /* Adjust long distance matching parameters */
  1390. ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
  1391. assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
  1392. assert(params.ldmParams.hashRateLog < 32);
  1393. }
  1394. { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
  1395. size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
  1396. U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
  1397. size_t const maxNbSeq = blockSize / divider;
  1398. size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
  1399. ? ZSTD_compressBound(blockSize) + 1
  1400. : 0;
  1401. size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
  1402. ? windowSize + blockSize
  1403. : 0;
  1404. size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
  1405. int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
  1406. ZSTD_indexResetPolicy_e needsIndexReset =
  1407. (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
  1408. size_t const neededSpace =
  1409. ZSTD_estimateCCtxSize_usingCCtxParams_internal(
  1410. &params.cParams, &params.ldmParams, zc->staticSize != 0,
  1411. buffInSize, buffOutSize, pledgedSrcSize);
  1412. FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
  1413. if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
  1414. /* Check if workspace is large enough, alloc a new one if needed */
  1415. {
  1416. int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
  1417. int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
  1418. DEBUGLOG(4, "Need %zu B workspace", neededSpace);
  1419. DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
  1420. if (workspaceTooSmall || workspaceWasteful) {
  1421. DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
  1422. ZSTD_cwksp_sizeof(ws) >> 10,
  1423. neededSpace >> 10);
  1424. RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
  1425. needsIndexReset = ZSTDirp_reset;
  1426. ZSTD_cwksp_free(ws, zc->customMem);
  1427. FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
  1428. DEBUGLOG(5, "reserving object space");
  1429. /* Statically sized space.
  1430. * entropyWorkspace never moves,
  1431. * though prev/next block swap places */
  1432. assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
  1433. zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1434. RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
  1435. zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
  1436. RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
  1437. zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
  1438. RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
  1439. } }
  1440. ZSTD_cwksp_clear(ws);
  1441. /* init params */
  1442. zc->appliedParams = params;
  1443. zc->blockState.matchState.cParams = params.cParams;
  1444. zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
  1445. zc->consumedSrcSize = 0;
  1446. zc->producedCSize = 0;
  1447. if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
  1448. zc->appliedParams.fParams.contentSizeFlag = 0;
  1449. DEBUGLOG(4, "pledged content size : %u ; flag : %u",
  1450. (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
  1451. zc->blockSize = blockSize;
  1452. xxh64_reset(&zc->xxhState, 0);
  1453. zc->stage = ZSTDcs_init;
  1454. zc->dictID = 0;
  1455. zc->dictContentSize = 0;
  1456. ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
  1457. /* ZSTD_wildcopy() is used to copy into the literals buffer,
  1458. * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
  1459. */
  1460. zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
  1461. zc->seqStore.maxNbLit = blockSize;
  1462. /* buffers */
  1463. zc->bufferedPolicy = zbuff;
  1464. zc->inBuffSize = buffInSize;
  1465. zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
  1466. zc->outBuffSize = buffOutSize;
  1467. zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
  1468. /* ldm bucketOffsets table */
  1469. if (params.ldmParams.enableLdm) {
  1470. /* TODO: avoid memset? */
  1471. size_t const numBuckets =
  1472. ((size_t)1) << (params.ldmParams.hashLog -
  1473. params.ldmParams.bucketSizeLog);
  1474. zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
  1475. ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
  1476. }
  1477. /* sequences storage */
  1478. ZSTD_referenceExternalSequences(zc, NULL, 0);
  1479. zc->seqStore.maxNbSeq = maxNbSeq;
  1480. zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1481. zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1482. zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
  1483. zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
  1484. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  1485. &zc->blockState.matchState,
  1486. ws,
  1487. &params.cParams,
  1488. crp,
  1489. needsIndexReset,
  1490. ZSTD_resetTarget_CCtx), "");
  1491. /* ldm hash table */
  1492. if (params.ldmParams.enableLdm) {
  1493. /* TODO: avoid memset? */
  1494. size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
  1495. zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
  1496. ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
  1497. zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
  1498. zc->maxNbLdmSequences = maxNbLdmSeq;
  1499. ZSTD_window_init(&zc->ldmState.window);
  1500. ZSTD_window_clear(&zc->ldmState.window);
  1501. zc->ldmState.loadedDictEnd = 0;
  1502. }
  1503. /* Due to alignment, when reusing a workspace, we can actually consume
  1504. * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
  1505. */
  1506. assert(ZSTD_cwksp_used(ws) >= neededSpace &&
  1507. ZSTD_cwksp_used(ws) <= neededSpace + 3);
  1508. DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
  1509. zc->initialized = 1;
  1510. return 0;
  1511. }
  1512. }
  1513. /* ZSTD_invalidateRepCodes() :
  1514. * ensures next compression will not use repcodes from previous block.
  1515. * Note : only works with regular variant;
  1516. * do not use with extDict variant ! */
  1517. void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
  1518. int i;
  1519. for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
  1520. assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
  1521. }
  1522. /* These are the approximate sizes for each strategy past which copying the
  1523. * dictionary tables into the working context is faster than using them
  1524. * in-place.
  1525. */
  1526. static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
  1527. 8 KB, /* unused */
  1528. 8 KB, /* ZSTD_fast */
  1529. 16 KB, /* ZSTD_dfast */
  1530. 32 KB, /* ZSTD_greedy */
  1531. 32 KB, /* ZSTD_lazy */
  1532. 32 KB, /* ZSTD_lazy2 */
  1533. 32 KB, /* ZSTD_btlazy2 */
  1534. 32 KB, /* ZSTD_btopt */
  1535. 8 KB, /* ZSTD_btultra */
  1536. 8 KB /* ZSTD_btultra2 */
  1537. };
  1538. static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
  1539. const ZSTD_CCtx_params* params,
  1540. U64 pledgedSrcSize)
  1541. {
  1542. size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
  1543. int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
  1544. return dedicatedDictSearch
  1545. || ( ( pledgedSrcSize <= cutoff
  1546. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  1547. || params->attachDictPref == ZSTD_dictForceAttach )
  1548. && params->attachDictPref != ZSTD_dictForceCopy
  1549. && !params->forceWindow ); /* dictMatchState isn't correctly
  1550. * handled in _enforceMaxDist */
  1551. }
  1552. static size_t
  1553. ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
  1554. const ZSTD_CDict* cdict,
  1555. ZSTD_CCtx_params params,
  1556. U64 pledgedSrcSize,
  1557. ZSTD_buffered_policy_e zbuff)
  1558. {
  1559. {
  1560. ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
  1561. unsigned const windowLog = params.cParams.windowLog;
  1562. assert(windowLog != 0);
  1563. /* Resize working context table params for input only, since the dict
  1564. * has its own tables. */
  1565. /* pledgedSrcSize == 0 means 0! */
  1566. if (cdict->matchState.dedicatedDictSearch) {
  1567. ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
  1568. }
  1569. params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
  1570. cdict->dictContentSize, ZSTD_cpm_attachDict);
  1571. params.cParams.windowLog = windowLog;
  1572. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
  1573. ZSTDcrp_makeClean, zbuff), "");
  1574. assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
  1575. }
  1576. { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
  1577. - cdict->matchState.window.base);
  1578. const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
  1579. if (cdictLen == 0) {
  1580. /* don't even attach dictionaries with no contents */
  1581. DEBUGLOG(4, "skipping attaching empty dictionary");
  1582. } else {
  1583. DEBUGLOG(4, "attaching dictionary into context");
  1584. cctx->blockState.matchState.dictMatchState = &cdict->matchState;
  1585. /* prep working match state so dict matches never have negative indices
  1586. * when they are translated to the working context's index space. */
  1587. if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
  1588. cctx->blockState.matchState.window.nextSrc =
  1589. cctx->blockState.matchState.window.base + cdictEnd;
  1590. ZSTD_window_clear(&cctx->blockState.matchState.window);
  1591. }
  1592. /* loadedDictEnd is expressed within the referential of the active context */
  1593. cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
  1594. } }
  1595. cctx->dictID = cdict->dictID;
  1596. cctx->dictContentSize = cdict->dictContentSize;
  1597. /* copy block state */
  1598. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1599. return 0;
  1600. }
  1601. static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
  1602. const ZSTD_CDict* cdict,
  1603. ZSTD_CCtx_params params,
  1604. U64 pledgedSrcSize,
  1605. ZSTD_buffered_policy_e zbuff)
  1606. {
  1607. const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
  1608. assert(!cdict->matchState.dedicatedDictSearch);
  1609. DEBUGLOG(4, "copying dictionary into context");
  1610. { unsigned const windowLog = params.cParams.windowLog;
  1611. assert(windowLog != 0);
  1612. /* Copy only compression parameters related to tables. */
  1613. params.cParams = *cdict_cParams;
  1614. params.cParams.windowLog = windowLog;
  1615. FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
  1616. ZSTDcrp_leaveDirty, zbuff), "");
  1617. assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
  1618. assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
  1619. assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
  1620. }
  1621. ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
  1622. /* copy tables */
  1623. { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
  1624. size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
  1625. ZSTD_memcpy(cctx->blockState.matchState.hashTable,
  1626. cdict->matchState.hashTable,
  1627. hSize * sizeof(U32));
  1628. ZSTD_memcpy(cctx->blockState.matchState.chainTable,
  1629. cdict->matchState.chainTable,
  1630. chainSize * sizeof(U32));
  1631. }
  1632. /* Zero the hashTable3, since the cdict never fills it */
  1633. { int const h3log = cctx->blockState.matchState.hashLog3;
  1634. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1635. assert(cdict->matchState.hashLog3 == 0);
  1636. ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
  1637. }
  1638. ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
  1639. /* copy dictionary offsets */
  1640. { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
  1641. ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
  1642. dstMatchState->window = srcMatchState->window;
  1643. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  1644. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  1645. }
  1646. cctx->dictID = cdict->dictID;
  1647. cctx->dictContentSize = cdict->dictContentSize;
  1648. /* copy block state */
  1649. ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
  1650. return 0;
  1651. }
  1652. /* We have a choice between copying the dictionary context into the working
  1653. * context, or referencing the dictionary context from the working context
  1654. * in-place. We decide here which strategy to use. */
  1655. static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
  1656. const ZSTD_CDict* cdict,
  1657. const ZSTD_CCtx_params* params,
  1658. U64 pledgedSrcSize,
  1659. ZSTD_buffered_policy_e zbuff)
  1660. {
  1661. DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
  1662. (unsigned)pledgedSrcSize);
  1663. if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
  1664. return ZSTD_resetCCtx_byAttachingCDict(
  1665. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1666. } else {
  1667. return ZSTD_resetCCtx_byCopyingCDict(
  1668. cctx, cdict, *params, pledgedSrcSize, zbuff);
  1669. }
  1670. }
  1671. /*! ZSTD_copyCCtx_internal() :
  1672. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  1673. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  1674. * The "context", in this case, refers to the hash and chain tables,
  1675. * entropy tables, and dictionary references.
  1676. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
  1677. * @return : 0, or an error code */
  1678. static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
  1679. const ZSTD_CCtx* srcCCtx,
  1680. ZSTD_frameParameters fParams,
  1681. U64 pledgedSrcSize,
  1682. ZSTD_buffered_policy_e zbuff)
  1683. {
  1684. DEBUGLOG(5, "ZSTD_copyCCtx_internal");
  1685. RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
  1686. "Can't copy a ctx that's not in init stage.");
  1687. ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
  1688. { ZSTD_CCtx_params params = dstCCtx->requestedParams;
  1689. /* Copy only compression parameters related to tables. */
  1690. params.cParams = srcCCtx->appliedParams.cParams;
  1691. params.fParams = fParams;
  1692. ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
  1693. ZSTDcrp_leaveDirty, zbuff);
  1694. assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
  1695. assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
  1696. assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
  1697. assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
  1698. assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
  1699. }
  1700. ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
  1701. /* copy tables */
  1702. { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
  1703. size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
  1704. int const h3log = srcCCtx->blockState.matchState.hashLog3;
  1705. size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
  1706. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
  1707. srcCCtx->blockState.matchState.hashTable,
  1708. hSize * sizeof(U32));
  1709. ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
  1710. srcCCtx->blockState.matchState.chainTable,
  1711. chainSize * sizeof(U32));
  1712. ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
  1713. srcCCtx->blockState.matchState.hashTable3,
  1714. h3Size * sizeof(U32));
  1715. }
  1716. ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
  1717. /* copy dictionary offsets */
  1718. {
  1719. const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
  1720. ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
  1721. dstMatchState->window = srcMatchState->window;
  1722. dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
  1723. dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
  1724. }
  1725. dstCCtx->dictID = srcCCtx->dictID;
  1726. dstCCtx->dictContentSize = srcCCtx->dictContentSize;
  1727. /* copy block state */
  1728. ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
  1729. return 0;
  1730. }
  1731. /*! ZSTD_copyCCtx() :
  1732. * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  1733. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
  1734. * pledgedSrcSize==0 means "unknown".
  1735. * @return : 0, or an error code */
  1736. size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
  1737. {
  1738. ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  1739. ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
  1740. ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
  1741. if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
  1742. fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
  1743. return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
  1744. fParams, pledgedSrcSize,
  1745. zbuff);
  1746. }
  1747. #define ZSTD_ROWSIZE 16
  1748. /*! ZSTD_reduceTable() :
  1749. * reduce table indexes by `reducerValue`, or squash to zero.
  1750. * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
  1751. * It must be set to a clear 0/1 value, to remove branch during inlining.
  1752. * Presume table size is a multiple of ZSTD_ROWSIZE
  1753. * to help auto-vectorization */
  1754. FORCE_INLINE_TEMPLATE void
  1755. ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
  1756. {
  1757. int const nbRows = (int)size / ZSTD_ROWSIZE;
  1758. int cellNb = 0;
  1759. int rowNb;
  1760. assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
  1761. assert(size < (1U<<31)); /* can be casted to int */
  1762. for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
  1763. int column;
  1764. for (column=0; column<ZSTD_ROWSIZE; column++) {
  1765. if (preserveMark) {
  1766. U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
  1767. table[cellNb] += adder;
  1768. }
  1769. if (table[cellNb] < reducerValue) table[cellNb] = 0;
  1770. else table[cellNb] -= reducerValue;
  1771. cellNb++;
  1772. } }
  1773. }
  1774. static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
  1775. {
  1776. ZSTD_reduceTable_internal(table, size, reducerValue, 0);
  1777. }
  1778. static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
  1779. {
  1780. ZSTD_reduceTable_internal(table, size, reducerValue, 1);
  1781. }
  1782. /*! ZSTD_reduceIndex() :
  1783. * rescale all indexes to avoid future overflow (indexes are U32) */
  1784. static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
  1785. {
  1786. { U32 const hSize = (U32)1 << params->cParams.hashLog;
  1787. ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
  1788. }
  1789. if (params->cParams.strategy != ZSTD_fast) {
  1790. U32 const chainSize = (U32)1 << params->cParams.chainLog;
  1791. if (params->cParams.strategy == ZSTD_btlazy2)
  1792. ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
  1793. else
  1794. ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
  1795. }
  1796. if (ms->hashLog3) {
  1797. U32 const h3Size = (U32)1 << ms->hashLog3;
  1798. ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
  1799. }
  1800. }
  1801. /*-*******************************************************
  1802. * Block entropic compression
  1803. *********************************************************/
  1804. /* See doc/zstd_compression_format.md for detailed format description */
  1805. void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
  1806. {
  1807. const seqDef* const sequences = seqStorePtr->sequencesStart;
  1808. BYTE* const llCodeTable = seqStorePtr->llCode;
  1809. BYTE* const ofCodeTable = seqStorePtr->ofCode;
  1810. BYTE* const mlCodeTable = seqStorePtr->mlCode;
  1811. U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  1812. U32 u;
  1813. assert(nbSeq <= seqStorePtr->maxNbSeq);
  1814. for (u=0; u<nbSeq; u++) {
  1815. U32 const llv = sequences[u].litLength;
  1816. U32 const mlv = sequences[u].matchLength;
  1817. llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
  1818. ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
  1819. mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
  1820. }
  1821. if (seqStorePtr->longLengthID==1)
  1822. llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
  1823. if (seqStorePtr->longLengthID==2)
  1824. mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
  1825. }
  1826. /* ZSTD_useTargetCBlockSize():
  1827. * Returns if target compressed block size param is being used.
  1828. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
  1829. * Returns 1 if true, 0 otherwise. */
  1830. static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
  1831. {
  1832. DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
  1833. return (cctxParams->targetCBlockSize != 0);
  1834. }
  1835. /* ZSTD_entropyCompressSequences_internal():
  1836. * actually compresses both literals and sequences */
  1837. MEM_STATIC size_t
  1838. ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
  1839. const ZSTD_entropyCTables_t* prevEntropy,
  1840. ZSTD_entropyCTables_t* nextEntropy,
  1841. const ZSTD_CCtx_params* cctxParams,
  1842. void* dst, size_t dstCapacity,
  1843. void* entropyWorkspace, size_t entropyWkspSize,
  1844. const int bmi2)
  1845. {
  1846. const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
  1847. ZSTD_strategy const strategy = cctxParams->cParams.strategy;
  1848. unsigned* count = (unsigned*)entropyWorkspace;
  1849. FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
  1850. FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
  1851. FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
  1852. U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
  1853. const seqDef* const sequences = seqStorePtr->sequencesStart;
  1854. const BYTE* const ofCodeTable = seqStorePtr->ofCode;
  1855. const BYTE* const llCodeTable = seqStorePtr->llCode;
  1856. const BYTE* const mlCodeTable = seqStorePtr->mlCode;
  1857. BYTE* const ostart = (BYTE*)dst;
  1858. BYTE* const oend = ostart + dstCapacity;
  1859. BYTE* op = ostart;
  1860. size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  1861. BYTE* seqHead;
  1862. BYTE* lastNCount = NULL;
  1863. entropyWorkspace = count + (MaxSeq + 1);
  1864. entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
  1865. DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
  1866. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  1867. assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
  1868. /* Compress literals */
  1869. { const BYTE* const literals = seqStorePtr->litStart;
  1870. size_t const litSize = (size_t)(seqStorePtr->lit - literals);
  1871. size_t const cSize = ZSTD_compressLiterals(
  1872. &prevEntropy->huf, &nextEntropy->huf,
  1873. cctxParams->cParams.strategy,
  1874. ZSTD_disableLiteralsCompression(cctxParams),
  1875. op, dstCapacity,
  1876. literals, litSize,
  1877. entropyWorkspace, entropyWkspSize,
  1878. bmi2);
  1879. FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
  1880. assert(cSize <= dstCapacity);
  1881. op += cSize;
  1882. }
  1883. /* Sequences Header */
  1884. RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
  1885. dstSize_tooSmall, "Can't fit seq hdr in output buf!");
  1886. if (nbSeq < 128) {
  1887. *op++ = (BYTE)nbSeq;
  1888. } else if (nbSeq < LONGNBSEQ) {
  1889. op[0] = (BYTE)((nbSeq>>8) + 0x80);
  1890. op[1] = (BYTE)nbSeq;
  1891. op+=2;
  1892. } else {
  1893. op[0]=0xFF;
  1894. MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
  1895. op+=3;
  1896. }
  1897. assert(op <= oend);
  1898. if (nbSeq==0) {
  1899. /* Copy the old tables over as if we repeated them */
  1900. ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
  1901. return (size_t)(op - ostart);
  1902. }
  1903. /* seqHead : flags for FSE encoding type */
  1904. seqHead = op++;
  1905. assert(op <= oend);
  1906. /* convert length/distances into codes */
  1907. ZSTD_seqToCodes(seqStorePtr);
  1908. /* build CTable for Literal Lengths */
  1909. { unsigned max = MaxLL;
  1910. size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  1911. DEBUGLOG(5, "Building LL table");
  1912. nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
  1913. LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
  1914. count, max, mostFrequent, nbSeq,
  1915. LLFSELog, prevEntropy->fse.litlengthCTable,
  1916. LL_defaultNorm, LL_defaultNormLog,
  1917. ZSTD_defaultAllowed, strategy);
  1918. assert(set_basic < set_compressed && set_rle < set_compressed);
  1919. assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  1920. { size_t const countSize = ZSTD_buildCTable(
  1921. op, (size_t)(oend - op),
  1922. CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
  1923. count, max, llCodeTable, nbSeq,
  1924. LL_defaultNorm, LL_defaultNormLog, MaxLL,
  1925. prevEntropy->fse.litlengthCTable,
  1926. sizeof(prevEntropy->fse.litlengthCTable),
  1927. entropyWorkspace, entropyWkspSize);
  1928. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
  1929. if (LLtype == set_compressed)
  1930. lastNCount = op;
  1931. op += countSize;
  1932. assert(op <= oend);
  1933. } }
  1934. /* build CTable for Offsets */
  1935. { unsigned max = MaxOff;
  1936. size_t const mostFrequent = HIST_countFast_wksp(
  1937. count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  1938. /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
  1939. ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
  1940. DEBUGLOG(5, "Building OF table");
  1941. nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
  1942. Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
  1943. count, max, mostFrequent, nbSeq,
  1944. OffFSELog, prevEntropy->fse.offcodeCTable,
  1945. OF_defaultNorm, OF_defaultNormLog,
  1946. defaultPolicy, strategy);
  1947. assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  1948. { size_t const countSize = ZSTD_buildCTable(
  1949. op, (size_t)(oend - op),
  1950. CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
  1951. count, max, ofCodeTable, nbSeq,
  1952. OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
  1953. prevEntropy->fse.offcodeCTable,
  1954. sizeof(prevEntropy->fse.offcodeCTable),
  1955. entropyWorkspace, entropyWkspSize);
  1956. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
  1957. if (Offtype == set_compressed)
  1958. lastNCount = op;
  1959. op += countSize;
  1960. assert(op <= oend);
  1961. } }
  1962. /* build CTable for MatchLengths */
  1963. { unsigned max = MaxML;
  1964. size_t const mostFrequent = HIST_countFast_wksp(
  1965. count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
  1966. DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
  1967. nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
  1968. MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
  1969. count, max, mostFrequent, nbSeq,
  1970. MLFSELog, prevEntropy->fse.matchlengthCTable,
  1971. ML_defaultNorm, ML_defaultNormLog,
  1972. ZSTD_defaultAllowed, strategy);
  1973. assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
  1974. { size_t const countSize = ZSTD_buildCTable(
  1975. op, (size_t)(oend - op),
  1976. CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
  1977. count, max, mlCodeTable, nbSeq,
  1978. ML_defaultNorm, ML_defaultNormLog, MaxML,
  1979. prevEntropy->fse.matchlengthCTable,
  1980. sizeof(prevEntropy->fse.matchlengthCTable),
  1981. entropyWorkspace, entropyWkspSize);
  1982. FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
  1983. if (MLtype == set_compressed)
  1984. lastNCount = op;
  1985. op += countSize;
  1986. assert(op <= oend);
  1987. } }
  1988. *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
  1989. { size_t const bitstreamSize = ZSTD_encodeSequences(
  1990. op, (size_t)(oend - op),
  1991. CTable_MatchLength, mlCodeTable,
  1992. CTable_OffsetBits, ofCodeTable,
  1993. CTable_LitLength, llCodeTable,
  1994. sequences, nbSeq,
  1995. longOffsets, bmi2);
  1996. FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
  1997. op += bitstreamSize;
  1998. assert(op <= oend);
  1999. /* zstd versions <= 1.3.4 mistakenly report corruption when
  2000. * FSE_readNCount() receives a buffer < 4 bytes.
  2001. * Fixed by https://github.com/facebook/zstd/pull/1146.
  2002. * This can happen when the last set_compressed table present is 2
  2003. * bytes and the bitstream is only one byte.
  2004. * In this exceedingly rare case, we will simply emit an uncompressed
  2005. * block, since it isn't worth optimizing.
  2006. */
  2007. if (lastNCount && (op - lastNCount) < 4) {
  2008. /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
  2009. assert(op - lastNCount == 3);
  2010. DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
  2011. "emitting an uncompressed block.");
  2012. return 0;
  2013. }
  2014. }
  2015. DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
  2016. return (size_t)(op - ostart);
  2017. }
  2018. MEM_STATIC size_t
  2019. ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
  2020. const ZSTD_entropyCTables_t* prevEntropy,
  2021. ZSTD_entropyCTables_t* nextEntropy,
  2022. const ZSTD_CCtx_params* cctxParams,
  2023. void* dst, size_t dstCapacity,
  2024. size_t srcSize,
  2025. void* entropyWorkspace, size_t entropyWkspSize,
  2026. int bmi2)
  2027. {
  2028. size_t const cSize = ZSTD_entropyCompressSequences_internal(
  2029. seqStorePtr, prevEntropy, nextEntropy, cctxParams,
  2030. dst, dstCapacity,
  2031. entropyWorkspace, entropyWkspSize, bmi2);
  2032. if (cSize == 0) return 0;
  2033. /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
  2034. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
  2035. */
  2036. if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
  2037. return 0; /* block not compressed */
  2038. FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
  2039. /* Check compressibility */
  2040. { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
  2041. if (cSize >= maxCSize) return 0; /* block not compressed */
  2042. }
  2043. DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
  2044. return cSize;
  2045. }
  2046. /* ZSTD_selectBlockCompressor() :
  2047. * Not static, but internal use only (used by long distance matcher)
  2048. * assumption : strat is a valid strategy */
  2049. ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
  2050. {
  2051. static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
  2052. { ZSTD_compressBlock_fast /* default for 0 */,
  2053. ZSTD_compressBlock_fast,
  2054. ZSTD_compressBlock_doubleFast,
  2055. ZSTD_compressBlock_greedy,
  2056. ZSTD_compressBlock_lazy,
  2057. ZSTD_compressBlock_lazy2,
  2058. ZSTD_compressBlock_btlazy2,
  2059. ZSTD_compressBlock_btopt,
  2060. ZSTD_compressBlock_btultra,
  2061. ZSTD_compressBlock_btultra2 },
  2062. { ZSTD_compressBlock_fast_extDict /* default for 0 */,
  2063. ZSTD_compressBlock_fast_extDict,
  2064. ZSTD_compressBlock_doubleFast_extDict,
  2065. ZSTD_compressBlock_greedy_extDict,
  2066. ZSTD_compressBlock_lazy_extDict,
  2067. ZSTD_compressBlock_lazy2_extDict,
  2068. ZSTD_compressBlock_btlazy2_extDict,
  2069. ZSTD_compressBlock_btopt_extDict,
  2070. ZSTD_compressBlock_btultra_extDict,
  2071. ZSTD_compressBlock_btultra_extDict },
  2072. { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
  2073. ZSTD_compressBlock_fast_dictMatchState,
  2074. ZSTD_compressBlock_doubleFast_dictMatchState,
  2075. ZSTD_compressBlock_greedy_dictMatchState,
  2076. ZSTD_compressBlock_lazy_dictMatchState,
  2077. ZSTD_compressBlock_lazy2_dictMatchState,
  2078. ZSTD_compressBlock_btlazy2_dictMatchState,
  2079. ZSTD_compressBlock_btopt_dictMatchState,
  2080. ZSTD_compressBlock_btultra_dictMatchState,
  2081. ZSTD_compressBlock_btultra_dictMatchState },
  2082. { NULL /* default for 0 */,
  2083. NULL,
  2084. NULL,
  2085. ZSTD_compressBlock_greedy_dedicatedDictSearch,
  2086. ZSTD_compressBlock_lazy_dedicatedDictSearch,
  2087. ZSTD_compressBlock_lazy2_dedicatedDictSearch,
  2088. NULL,
  2089. NULL,
  2090. NULL,
  2091. NULL }
  2092. };
  2093. ZSTD_blockCompressor selectedCompressor;
  2094. ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
  2095. assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
  2096. selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
  2097. assert(selectedCompressor != NULL);
  2098. return selectedCompressor;
  2099. }
  2100. static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
  2101. const BYTE* anchor, size_t lastLLSize)
  2102. {
  2103. ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
  2104. seqStorePtr->lit += lastLLSize;
  2105. }
  2106. void ZSTD_resetSeqStore(seqStore_t* ssPtr)
  2107. {
  2108. ssPtr->lit = ssPtr->litStart;
  2109. ssPtr->sequences = ssPtr->sequencesStart;
  2110. ssPtr->longLengthID = 0;
  2111. }
  2112. typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
  2113. static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
  2114. {
  2115. ZSTD_matchState_t* const ms = &zc->blockState.matchState;
  2116. DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
  2117. assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
  2118. /* Assert that we have correctly flushed the ctx params into the ms's copy */
  2119. ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
  2120. if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  2121. if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
  2122. ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
  2123. } else {
  2124. ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
  2125. }
  2126. return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
  2127. }
  2128. ZSTD_resetSeqStore(&(zc->seqStore));
  2129. /* required for optimal parser to read stats from dictionary */
  2130. ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
  2131. /* tell the optimal parser how we expect to compress literals */
  2132. ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
  2133. /* a gap between an attached dict and the current window is not safe,
  2134. * they must remain adjacent,
  2135. * and when that stops being the case, the dict must be unset */
  2136. assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
  2137. /* limited update after a very long match */
  2138. { const BYTE* const base = ms->window.base;
  2139. const BYTE* const istart = (const BYTE*)src;
  2140. const U32 curr = (U32)(istart-base);
  2141. if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
  2142. if (curr > ms->nextToUpdate + 384)
  2143. ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
  2144. }
  2145. /* select and store sequences */
  2146. { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
  2147. size_t lastLLSize;
  2148. { int i;
  2149. for (i = 0; i < ZSTD_REP_NUM; ++i)
  2150. zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
  2151. }
  2152. if (zc->externSeqStore.pos < zc->externSeqStore.size) {
  2153. assert(!zc->appliedParams.ldmParams.enableLdm);
  2154. /* Updates ldmSeqStore.pos */
  2155. lastLLSize =
  2156. ZSTD_ldm_blockCompress(&zc->externSeqStore,
  2157. ms, &zc->seqStore,
  2158. zc->blockState.nextCBlock->rep,
  2159. src, srcSize);
  2160. assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
  2161. } else if (zc->appliedParams.ldmParams.enableLdm) {
  2162. rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
  2163. ldmSeqStore.seq = zc->ldmSequences;
  2164. ldmSeqStore.capacity = zc->maxNbLdmSequences;
  2165. /* Updates ldmSeqStore.size */
  2166. FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
  2167. &zc->appliedParams.ldmParams,
  2168. src, srcSize), "");
  2169. /* Updates ldmSeqStore.pos */
  2170. lastLLSize =
  2171. ZSTD_ldm_blockCompress(&ldmSeqStore,
  2172. ms, &zc->seqStore,
  2173. zc->blockState.nextCBlock->rep,
  2174. src, srcSize);
  2175. assert(ldmSeqStore.pos == ldmSeqStore.size);
  2176. } else { /* not long range mode */
  2177. ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
  2178. ms->ldmSeqStore = NULL;
  2179. lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
  2180. }
  2181. { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
  2182. ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
  2183. } }
  2184. return ZSTDbss_compress;
  2185. }
  2186. static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
  2187. {
  2188. const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
  2189. const seqDef* seqStoreSeqs = seqStore->sequencesStart;
  2190. size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
  2191. size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
  2192. size_t literalsRead = 0;
  2193. size_t lastLLSize;
  2194. ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
  2195. size_t i;
  2196. repcodes_t updatedRepcodes;
  2197. assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
  2198. /* Ensure we have enough space for last literals "sequence" */
  2199. assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
  2200. ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
  2201. for (i = 0; i < seqStoreSeqSize; ++i) {
  2202. U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
  2203. outSeqs[i].litLength = seqStoreSeqs[i].litLength;
  2204. outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
  2205. outSeqs[i].rep = 0;
  2206. if (i == seqStore->longLengthPos) {
  2207. if (seqStore->longLengthID == 1) {
  2208. outSeqs[i].litLength += 0x10000;
  2209. } else if (seqStore->longLengthID == 2) {
  2210. outSeqs[i].matchLength += 0x10000;
  2211. }
  2212. }
  2213. if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
  2214. /* Derive the correct offset corresponding to a repcode */
  2215. outSeqs[i].rep = seqStoreSeqs[i].offset;
  2216. if (outSeqs[i].litLength != 0) {
  2217. rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
  2218. } else {
  2219. if (outSeqs[i].rep == 3) {
  2220. rawOffset = updatedRepcodes.rep[0] - 1;
  2221. } else {
  2222. rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
  2223. }
  2224. }
  2225. }
  2226. outSeqs[i].offset = rawOffset;
  2227. /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
  2228. so we provide seqStoreSeqs[i].offset - 1 */
  2229. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
  2230. seqStoreSeqs[i].offset - 1,
  2231. seqStoreSeqs[i].litLength == 0);
  2232. literalsRead += outSeqs[i].litLength;
  2233. }
  2234. /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
  2235. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
  2236. * for the block boundary, according to the API.
  2237. */
  2238. assert(seqStoreLiteralsSize >= literalsRead);
  2239. lastLLSize = seqStoreLiteralsSize - literalsRead;
  2240. outSeqs[i].litLength = (U32)lastLLSize;
  2241. outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
  2242. seqStoreSeqSize++;
  2243. zc->seqCollector.seqIndex += seqStoreSeqSize;
  2244. }
  2245. size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
  2246. size_t outSeqsSize, const void* src, size_t srcSize)
  2247. {
  2248. const size_t dstCapacity = ZSTD_compressBound(srcSize);
  2249. void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
  2250. SeqCollector seqCollector;
  2251. RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
  2252. seqCollector.collectSequences = 1;
  2253. seqCollector.seqStart = outSeqs;
  2254. seqCollector.seqIndex = 0;
  2255. seqCollector.maxSequences = outSeqsSize;
  2256. zc->seqCollector = seqCollector;
  2257. ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
  2258. ZSTD_customFree(dst, ZSTD_defaultCMem);
  2259. return zc->seqCollector.seqIndex;
  2260. }
  2261. size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
  2262. size_t in = 0;
  2263. size_t out = 0;
  2264. for (; in < seqsSize; ++in) {
  2265. if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
  2266. if (in != seqsSize - 1) {
  2267. sequences[in+1].litLength += sequences[in].litLength;
  2268. }
  2269. } else {
  2270. sequences[out] = sequences[in];
  2271. ++out;
  2272. }
  2273. }
  2274. return out;
  2275. }
  2276. /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
  2277. static int ZSTD_isRLE(const BYTE* src, size_t length) {
  2278. const BYTE* ip = src;
  2279. const BYTE value = ip[0];
  2280. const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
  2281. const size_t unrollSize = sizeof(size_t) * 4;
  2282. const size_t unrollMask = unrollSize - 1;
  2283. const size_t prefixLength = length & unrollMask;
  2284. size_t i;
  2285. size_t u;
  2286. if (length == 1) return 1;
  2287. /* Check if prefix is RLE first before using unrolled loop */
  2288. if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
  2289. return 0;
  2290. }
  2291. for (i = prefixLength; i != length; i += unrollSize) {
  2292. for (u = 0; u < unrollSize; u += sizeof(size_t)) {
  2293. if (MEM_readST(ip + i + u) != valueST) {
  2294. return 0;
  2295. }
  2296. }
  2297. }
  2298. return 1;
  2299. }
  2300. /* Returns true if the given block may be RLE.
  2301. * This is just a heuristic based on the compressibility.
  2302. * It may return both false positives and false negatives.
  2303. */
  2304. static int ZSTD_maybeRLE(seqStore_t const* seqStore)
  2305. {
  2306. size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
  2307. size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
  2308. return nbSeqs < 4 && nbLits < 10;
  2309. }
  2310. static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
  2311. {
  2312. ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
  2313. zc->blockState.prevCBlock = zc->blockState.nextCBlock;
  2314. zc->blockState.nextCBlock = tmp;
  2315. }
  2316. static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
  2317. void* dst, size_t dstCapacity,
  2318. const void* src, size_t srcSize, U32 frame)
  2319. {
  2320. /* This the upper bound for the length of an rle block.
  2321. * This isn't the actual upper bound. Finding the real threshold
  2322. * needs further investigation.
  2323. */
  2324. const U32 rleMaxLength = 25;
  2325. size_t cSize;
  2326. const BYTE* ip = (const BYTE*)src;
  2327. BYTE* op = (BYTE*)dst;
  2328. DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
  2329. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
  2330. (unsigned)zc->blockState.matchState.nextToUpdate);
  2331. { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  2332. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  2333. if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
  2334. }
  2335. if (zc->seqCollector.collectSequences) {
  2336. ZSTD_copyBlockSequences(zc);
  2337. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2338. return 0;
  2339. }
  2340. /* encode sequences and literals */
  2341. cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
  2342. &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
  2343. &zc->appliedParams,
  2344. dst, dstCapacity,
  2345. srcSize,
  2346. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  2347. zc->bmi2);
  2348. if (zc->seqCollector.collectSequences) {
  2349. ZSTD_copyBlockSequences(zc);
  2350. return 0;
  2351. }
  2352. if (frame &&
  2353. /* We don't want to emit our first block as a RLE even if it qualifies because
  2354. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  2355. * This is only an issue for zstd <= v1.4.3
  2356. */
  2357. !zc->isFirstBlock &&
  2358. cSize < rleMaxLength &&
  2359. ZSTD_isRLE(ip, srcSize))
  2360. {
  2361. cSize = 1;
  2362. op[0] = ip[0];
  2363. }
  2364. out:
  2365. if (!ZSTD_isError(cSize) && cSize > 1) {
  2366. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2367. }
  2368. /* We check that dictionaries have offset codes available for the first
  2369. * block. After the first block, the offcode table might not have large
  2370. * enough codes to represent the offsets in the data.
  2371. */
  2372. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  2373. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  2374. return cSize;
  2375. }
  2376. static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
  2377. void* dst, size_t dstCapacity,
  2378. const void* src, size_t srcSize,
  2379. const size_t bss, U32 lastBlock)
  2380. {
  2381. DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
  2382. if (bss == ZSTDbss_compress) {
  2383. if (/* We don't want to emit our first block as a RLE even if it qualifies because
  2384. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  2385. * This is only an issue for zstd <= v1.4.3
  2386. */
  2387. !zc->isFirstBlock &&
  2388. ZSTD_maybeRLE(&zc->seqStore) &&
  2389. ZSTD_isRLE((BYTE const*)src, srcSize))
  2390. {
  2391. return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
  2392. }
  2393. /* Attempt superblock compression.
  2394. *
  2395. * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
  2396. * standard ZSTD_compressBound(). This is a problem, because even if we have
  2397. * space now, taking an extra byte now could cause us to run out of space later
  2398. * and violate ZSTD_compressBound().
  2399. *
  2400. * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
  2401. *
  2402. * In order to respect ZSTD_compressBound() we must attempt to emit a raw
  2403. * uncompressed block in these cases:
  2404. * * cSize == 0: Return code for an uncompressed block.
  2405. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
  2406. * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
  2407. * output space.
  2408. * * cSize >= blockBound(srcSize): We have expanded the block too much so
  2409. * emit an uncompressed block.
  2410. */
  2411. {
  2412. size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
  2413. if (cSize != ERROR(dstSize_tooSmall)) {
  2414. size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
  2415. FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
  2416. if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
  2417. ZSTD_confirmRepcodesAndEntropyTables(zc);
  2418. return cSize;
  2419. }
  2420. }
  2421. }
  2422. }
  2423. DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
  2424. /* Superblock compression failed, attempt to emit a single no compress block.
  2425. * The decoder will be able to stream this block since it is uncompressed.
  2426. */
  2427. return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
  2428. }
  2429. static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
  2430. void* dst, size_t dstCapacity,
  2431. const void* src, size_t srcSize,
  2432. U32 lastBlock)
  2433. {
  2434. size_t cSize = 0;
  2435. const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
  2436. DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
  2437. (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
  2438. FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
  2439. cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
  2440. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
  2441. if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  2442. zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  2443. return cSize;
  2444. }
  2445. static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
  2446. ZSTD_cwksp* ws,
  2447. ZSTD_CCtx_params const* params,
  2448. void const* ip,
  2449. void const* iend)
  2450. {
  2451. if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
  2452. U32 const maxDist = (U32)1 << params->cParams.windowLog;
  2453. U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
  2454. U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
  2455. ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
  2456. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
  2457. ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
  2458. ZSTD_cwksp_mark_tables_dirty(ws);
  2459. ZSTD_reduceIndex(ms, params, correction);
  2460. ZSTD_cwksp_mark_tables_clean(ws);
  2461. if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
  2462. else ms->nextToUpdate -= correction;
  2463. /* invalidate dictionaries on overflow correction */
  2464. ms->loadedDictEnd = 0;
  2465. ms->dictMatchState = NULL;
  2466. }
  2467. }
  2468. /*! ZSTD_compress_frameChunk() :
  2469. * Compress a chunk of data into one or multiple blocks.
  2470. * All blocks will be terminated, all input will be consumed.
  2471. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
  2472. * Frame is supposed already started (header already produced)
  2473. * @return : compressed size, or an error code
  2474. */
  2475. static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
  2476. void* dst, size_t dstCapacity,
  2477. const void* src, size_t srcSize,
  2478. U32 lastFrameChunk)
  2479. {
  2480. size_t blockSize = cctx->blockSize;
  2481. size_t remaining = srcSize;
  2482. const BYTE* ip = (const BYTE*)src;
  2483. BYTE* const ostart = (BYTE*)dst;
  2484. BYTE* op = ostart;
  2485. U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
  2486. assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
  2487. DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
  2488. if (cctx->appliedParams.fParams.checksumFlag && srcSize)
  2489. xxh64_update(&cctx->xxhState, src, srcSize);
  2490. while (remaining) {
  2491. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  2492. U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
  2493. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
  2494. dstSize_tooSmall,
  2495. "not enough space to store compressed block");
  2496. if (remaining < blockSize) blockSize = remaining;
  2497. ZSTD_overflowCorrectIfNeeded(
  2498. ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
  2499. ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
  2500. /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
  2501. if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
  2502. { size_t cSize;
  2503. if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
  2504. cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
  2505. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
  2506. assert(cSize > 0);
  2507. assert(cSize <= blockSize + ZSTD_blockHeaderSize);
  2508. } else {
  2509. cSize = ZSTD_compressBlock_internal(cctx,
  2510. op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
  2511. ip, blockSize, 1 /* frame */);
  2512. FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
  2513. if (cSize == 0) { /* block is not compressible */
  2514. cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  2515. FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
  2516. } else {
  2517. U32 const cBlockHeader = cSize == 1 ?
  2518. lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
  2519. lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
  2520. MEM_writeLE24(op, cBlockHeader);
  2521. cSize += ZSTD_blockHeaderSize;
  2522. }
  2523. }
  2524. ip += blockSize;
  2525. assert(remaining >= blockSize);
  2526. remaining -= blockSize;
  2527. op += cSize;
  2528. assert(dstCapacity >= cSize);
  2529. dstCapacity -= cSize;
  2530. cctx->isFirstBlock = 0;
  2531. DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
  2532. (unsigned)cSize);
  2533. } }
  2534. if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
  2535. return (size_t)(op-ostart);
  2536. }
  2537. static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
  2538. const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
  2539. { BYTE* const op = (BYTE*)dst;
  2540. U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
  2541. U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
  2542. U32 const checksumFlag = params->fParams.checksumFlag>0;
  2543. U32 const windowSize = (U32)1 << params->cParams.windowLog;
  2544. U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
  2545. BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
  2546. U32 const fcsCode = params->fParams.contentSizeFlag ?
  2547. (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
  2548. BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
  2549. size_t pos=0;
  2550. assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
  2551. RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
  2552. "dst buf is too small to fit worst-case frame header size.");
  2553. DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
  2554. !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
  2555. if (params->format == ZSTD_f_zstd1) {
  2556. MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
  2557. pos = 4;
  2558. }
  2559. op[pos++] = frameHeaderDescriptionByte;
  2560. if (!singleSegment) op[pos++] = windowLogByte;
  2561. switch(dictIDSizeCode)
  2562. {
  2563. default:
  2564. assert(0); /* impossible */
  2565. ZSTD_FALLTHROUGH;
  2566. case 0 : break;
  2567. case 1 : op[pos] = (BYTE)(dictID); pos++; break;
  2568. case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
  2569. case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
  2570. }
  2571. switch(fcsCode)
  2572. {
  2573. default:
  2574. assert(0); /* impossible */
  2575. ZSTD_FALLTHROUGH;
  2576. case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
  2577. case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
  2578. case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
  2579. case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
  2580. }
  2581. return pos;
  2582. }
  2583. /* ZSTD_writeSkippableFrame_advanced() :
  2584. * Writes out a skippable frame with the specified magic number variant (16 are supported),
  2585. * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
  2586. *
  2587. * Returns the total number of bytes written, or a ZSTD error code.
  2588. */
  2589. size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
  2590. const void* src, size_t srcSize, unsigned magicVariant) {
  2591. BYTE* op = (BYTE*)dst;
  2592. RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
  2593. dstSize_tooSmall, "Not enough room for skippable frame");
  2594. RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
  2595. RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
  2596. MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
  2597. MEM_writeLE32(op+4, (U32)srcSize);
  2598. ZSTD_memcpy(op+8, src, srcSize);
  2599. return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
  2600. }
  2601. /* ZSTD_writeLastEmptyBlock() :
  2602. * output an empty Block with end-of-frame mark to complete a frame
  2603. * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
  2604. * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  2605. */
  2606. size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
  2607. {
  2608. RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
  2609. "dst buf is too small to write frame trailer empty block.");
  2610. { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
  2611. MEM_writeLE24(dst, cBlockHeader24);
  2612. return ZSTD_blockHeaderSize;
  2613. }
  2614. }
  2615. size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
  2616. {
  2617. RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
  2618. "wrong cctx stage");
  2619. RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
  2620. parameter_unsupported,
  2621. "incompatible with ldm");
  2622. cctx->externSeqStore.seq = seq;
  2623. cctx->externSeqStore.size = nbSeq;
  2624. cctx->externSeqStore.capacity = nbSeq;
  2625. cctx->externSeqStore.pos = 0;
  2626. cctx->externSeqStore.posInSequence = 0;
  2627. return 0;
  2628. }
  2629. static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
  2630. void* dst, size_t dstCapacity,
  2631. const void* src, size_t srcSize,
  2632. U32 frame, U32 lastFrameChunk)
  2633. {
  2634. ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
  2635. size_t fhSize = 0;
  2636. DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
  2637. cctx->stage, (unsigned)srcSize);
  2638. RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
  2639. "missing init (ZSTD_compressBegin)");
  2640. if (frame && (cctx->stage==ZSTDcs_init)) {
  2641. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
  2642. cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
  2643. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  2644. assert(fhSize <= dstCapacity);
  2645. dstCapacity -= fhSize;
  2646. dst = (char*)dst + fhSize;
  2647. cctx->stage = ZSTDcs_ongoing;
  2648. }
  2649. if (!srcSize) return fhSize; /* do not generate an empty block if no input */
  2650. if (!ZSTD_window_update(&ms->window, src, srcSize)) {
  2651. ms->nextToUpdate = ms->window.dictLimit;
  2652. }
  2653. if (cctx->appliedParams.ldmParams.enableLdm) {
  2654. ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
  2655. }
  2656. if (!frame) {
  2657. /* overflow check and correction for block mode */
  2658. ZSTD_overflowCorrectIfNeeded(
  2659. ms, &cctx->workspace, &cctx->appliedParams,
  2660. src, (BYTE const*)src + srcSize);
  2661. }
  2662. DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
  2663. { size_t const cSize = frame ?
  2664. ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
  2665. ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
  2666. FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
  2667. cctx->consumedSrcSize += srcSize;
  2668. cctx->producedCSize += (cSize + fhSize);
  2669. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  2670. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  2671. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  2672. RETURN_ERROR_IF(
  2673. cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
  2674. srcSize_wrong,
  2675. "error : pledgedSrcSize = %u, while realSrcSize >= %u",
  2676. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  2677. (unsigned)cctx->consumedSrcSize);
  2678. }
  2679. return cSize + fhSize;
  2680. }
  2681. }
  2682. size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
  2683. void* dst, size_t dstCapacity,
  2684. const void* src, size_t srcSize)
  2685. {
  2686. DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
  2687. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
  2688. }
  2689. size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
  2690. {
  2691. ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
  2692. assert(!ZSTD_checkCParams(cParams));
  2693. return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
  2694. }
  2695. size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  2696. {
  2697. DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
  2698. { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
  2699. RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
  2700. return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
  2701. }
  2702. /*! ZSTD_loadDictionaryContent() :
  2703. * @return : 0, or an error code
  2704. */
  2705. static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
  2706. ldmState_t* ls,
  2707. ZSTD_cwksp* ws,
  2708. ZSTD_CCtx_params const* params,
  2709. const void* src, size_t srcSize,
  2710. ZSTD_dictTableLoadMethod_e dtlm)
  2711. {
  2712. const BYTE* ip = (const BYTE*) src;
  2713. const BYTE* const iend = ip + srcSize;
  2714. ZSTD_window_update(&ms->window, src, srcSize);
  2715. ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
  2716. if (params->ldmParams.enableLdm && ls != NULL) {
  2717. ZSTD_window_update(&ls->window, src, srcSize);
  2718. ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
  2719. }
  2720. /* Assert that we the ms params match the params we're being given */
  2721. ZSTD_assertEqualCParams(params->cParams, ms->cParams);
  2722. if (srcSize <= HASH_READ_SIZE) return 0;
  2723. while (iend - ip > HASH_READ_SIZE) {
  2724. size_t const remaining = (size_t)(iend - ip);
  2725. size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
  2726. const BYTE* const ichunk = ip + chunk;
  2727. ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
  2728. if (params->ldmParams.enableLdm && ls != NULL)
  2729. ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
  2730. switch(params->cParams.strategy)
  2731. {
  2732. case ZSTD_fast:
  2733. ZSTD_fillHashTable(ms, ichunk, dtlm);
  2734. break;
  2735. case ZSTD_dfast:
  2736. ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
  2737. break;
  2738. case ZSTD_greedy:
  2739. case ZSTD_lazy:
  2740. case ZSTD_lazy2:
  2741. if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
  2742. assert(chunk == remaining); /* must load everything in one go */
  2743. ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
  2744. } else if (chunk >= HASH_READ_SIZE) {
  2745. ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
  2746. }
  2747. break;
  2748. case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
  2749. case ZSTD_btopt:
  2750. case ZSTD_btultra:
  2751. case ZSTD_btultra2:
  2752. if (chunk >= HASH_READ_SIZE)
  2753. ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
  2754. break;
  2755. default:
  2756. assert(0); /* not possible : not a valid strategy id */
  2757. }
  2758. ip = ichunk;
  2759. }
  2760. ms->nextToUpdate = (U32)(iend - ms->window.base);
  2761. return 0;
  2762. }
  2763. /* Dictionaries that assign zero probability to symbols that show up causes problems
  2764. * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
  2765. * and only dictionaries with 100% valid symbols can be assumed valid.
  2766. */
  2767. static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
  2768. {
  2769. U32 s;
  2770. if (dictMaxSymbolValue < maxSymbolValue) {
  2771. return FSE_repeat_check;
  2772. }
  2773. for (s = 0; s <= maxSymbolValue; ++s) {
  2774. if (normalizedCounter[s] == 0) {
  2775. return FSE_repeat_check;
  2776. }
  2777. }
  2778. return FSE_repeat_valid;
  2779. }
  2780. size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
  2781. const void* const dict, size_t dictSize)
  2782. {
  2783. short offcodeNCount[MaxOff+1];
  2784. unsigned offcodeMaxValue = MaxOff;
  2785. const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
  2786. const BYTE* const dictEnd = dictPtr + dictSize;
  2787. dictPtr += 8;
  2788. bs->entropy.huf.repeatMode = HUF_repeat_check;
  2789. { unsigned maxSymbolValue = 255;
  2790. unsigned hasZeroWeights = 1;
  2791. size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
  2792. dictEnd-dictPtr, &hasZeroWeights);
  2793. /* We only set the loaded table as valid if it contains all non-zero
  2794. * weights. Otherwise, we set it to check */
  2795. if (!hasZeroWeights)
  2796. bs->entropy.huf.repeatMode = HUF_repeat_valid;
  2797. RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
  2798. RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
  2799. dictPtr += hufHeaderSize;
  2800. }
  2801. { unsigned offcodeLog;
  2802. size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
  2803. RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
  2804. RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
  2805. /* fill all offset symbols to avoid garbage at end of table */
  2806. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2807. bs->entropy.fse.offcodeCTable,
  2808. offcodeNCount, MaxOff, offcodeLog,
  2809. workspace, HUF_WORKSPACE_SIZE)),
  2810. dictionary_corrupted, "");
  2811. /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
  2812. dictPtr += offcodeHeaderSize;
  2813. }
  2814. { short matchlengthNCount[MaxML+1];
  2815. unsigned matchlengthMaxValue = MaxML, matchlengthLog;
  2816. size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
  2817. RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
  2818. RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
  2819. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2820. bs->entropy.fse.matchlengthCTable,
  2821. matchlengthNCount, matchlengthMaxValue, matchlengthLog,
  2822. workspace, HUF_WORKSPACE_SIZE)),
  2823. dictionary_corrupted, "");
  2824. bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
  2825. dictPtr += matchlengthHeaderSize;
  2826. }
  2827. { short litlengthNCount[MaxLL+1];
  2828. unsigned litlengthMaxValue = MaxLL, litlengthLog;
  2829. size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
  2830. RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
  2831. RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
  2832. RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
  2833. bs->entropy.fse.litlengthCTable,
  2834. litlengthNCount, litlengthMaxValue, litlengthLog,
  2835. workspace, HUF_WORKSPACE_SIZE)),
  2836. dictionary_corrupted, "");
  2837. bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
  2838. dictPtr += litlengthHeaderSize;
  2839. }
  2840. RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
  2841. bs->rep[0] = MEM_readLE32(dictPtr+0);
  2842. bs->rep[1] = MEM_readLE32(dictPtr+4);
  2843. bs->rep[2] = MEM_readLE32(dictPtr+8);
  2844. dictPtr += 12;
  2845. { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  2846. U32 offcodeMax = MaxOff;
  2847. if (dictContentSize <= ((U32)-1) - 128 KB) {
  2848. U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
  2849. offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
  2850. }
  2851. /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
  2852. bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
  2853. /* All repCodes must be <= dictContentSize and != 0 */
  2854. { U32 u;
  2855. for (u=0; u<3; u++) {
  2856. RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
  2857. RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
  2858. } } }
  2859. return dictPtr - (const BYTE*)dict;
  2860. }
  2861. /* Dictionary format :
  2862. * See :
  2863. * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
  2864. */
  2865. /*! ZSTD_loadZstdDictionary() :
  2866. * @return : dictID, or an error code
  2867. * assumptions : magic number supposed already checked
  2868. * dictSize supposed >= 8
  2869. */
  2870. static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
  2871. ZSTD_matchState_t* ms,
  2872. ZSTD_cwksp* ws,
  2873. ZSTD_CCtx_params const* params,
  2874. const void* dict, size_t dictSize,
  2875. ZSTD_dictTableLoadMethod_e dtlm,
  2876. void* workspace)
  2877. {
  2878. const BYTE* dictPtr = (const BYTE*)dict;
  2879. const BYTE* const dictEnd = dictPtr + dictSize;
  2880. size_t dictID;
  2881. size_t eSize;
  2882. ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
  2883. assert(dictSize >= 8);
  2884. assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
  2885. dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
  2886. eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
  2887. FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
  2888. dictPtr += eSize;
  2889. {
  2890. size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
  2891. FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
  2892. ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
  2893. }
  2894. return dictID;
  2895. }
  2896. /* ZSTD_compress_insertDictionary() :
  2897. * @return : dictID, or an error code */
  2898. static size_t
  2899. ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
  2900. ZSTD_matchState_t* ms,
  2901. ldmState_t* ls,
  2902. ZSTD_cwksp* ws,
  2903. const ZSTD_CCtx_params* params,
  2904. const void* dict, size_t dictSize,
  2905. ZSTD_dictContentType_e dictContentType,
  2906. ZSTD_dictTableLoadMethod_e dtlm,
  2907. void* workspace)
  2908. {
  2909. DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
  2910. if ((dict==NULL) || (dictSize<8)) {
  2911. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  2912. return 0;
  2913. }
  2914. ZSTD_reset_compressedBlockState(bs);
  2915. /* dict restricted modes */
  2916. if (dictContentType == ZSTD_dct_rawContent)
  2917. return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
  2918. if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
  2919. if (dictContentType == ZSTD_dct_auto) {
  2920. DEBUGLOG(4, "raw content dictionary detected");
  2921. return ZSTD_loadDictionaryContent(
  2922. ms, ls, ws, params, dict, dictSize, dtlm);
  2923. }
  2924. RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
  2925. assert(0); /* impossible */
  2926. }
  2927. /* dict as full zstd dictionary */
  2928. return ZSTD_loadZstdDictionary(
  2929. bs, ms, ws, params, dict, dictSize, dtlm, workspace);
  2930. }
  2931. #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
  2932. #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
  2933. /*! ZSTD_compressBegin_internal() :
  2934. * @return : 0, or an error code */
  2935. static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
  2936. const void* dict, size_t dictSize,
  2937. ZSTD_dictContentType_e dictContentType,
  2938. ZSTD_dictTableLoadMethod_e dtlm,
  2939. const ZSTD_CDict* cdict,
  2940. const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
  2941. ZSTD_buffered_policy_e zbuff)
  2942. {
  2943. DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
  2944. /* params are supposed to be fully validated at this point */
  2945. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  2946. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  2947. if ( (cdict)
  2948. && (cdict->dictContentSize > 0)
  2949. && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  2950. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  2951. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  2952. || cdict->compressionLevel == 0)
  2953. && (params->attachDictPref != ZSTD_dictForceLoad) ) {
  2954. return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
  2955. }
  2956. FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
  2957. ZSTDcrp_makeClean, zbuff) , "");
  2958. { size_t const dictID = cdict ?
  2959. ZSTD_compress_insertDictionary(
  2960. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  2961. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
  2962. cdict->dictContentSize, cdict->dictContentType, dtlm,
  2963. cctx->entropyWorkspace)
  2964. : ZSTD_compress_insertDictionary(
  2965. cctx->blockState.prevCBlock, &cctx->blockState.matchState,
  2966. &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
  2967. dictContentType, dtlm, cctx->entropyWorkspace);
  2968. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  2969. assert(dictID <= UINT_MAX);
  2970. cctx->dictID = (U32)dictID;
  2971. cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
  2972. }
  2973. return 0;
  2974. }
  2975. size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
  2976. const void* dict, size_t dictSize,
  2977. ZSTD_dictContentType_e dictContentType,
  2978. ZSTD_dictTableLoadMethod_e dtlm,
  2979. const ZSTD_CDict* cdict,
  2980. const ZSTD_CCtx_params* params,
  2981. unsigned long long pledgedSrcSize)
  2982. {
  2983. DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
  2984. /* compression parameters verification and optimization */
  2985. FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
  2986. return ZSTD_compressBegin_internal(cctx,
  2987. dict, dictSize, dictContentType, dtlm,
  2988. cdict,
  2989. params, pledgedSrcSize,
  2990. ZSTDb_not_buffered);
  2991. }
  2992. /*! ZSTD_compressBegin_advanced() :
  2993. * @return : 0, or an error code */
  2994. size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
  2995. const void* dict, size_t dictSize,
  2996. ZSTD_parameters params, unsigned long long pledgedSrcSize)
  2997. {
  2998. ZSTD_CCtx_params cctxParams;
  2999. ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
  3000. return ZSTD_compressBegin_advanced_internal(cctx,
  3001. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
  3002. NULL /*cdict*/,
  3003. &cctxParams, pledgedSrcSize);
  3004. }
  3005. size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
  3006. {
  3007. ZSTD_CCtx_params cctxParams;
  3008. {
  3009. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
  3010. ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
  3011. }
  3012. DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
  3013. return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  3014. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
  3015. }
  3016. size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
  3017. {
  3018. return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
  3019. }
  3020. /*! ZSTD_writeEpilogue() :
  3021. * Ends a frame.
  3022. * @return : nb of bytes written into dst (or an error code) */
  3023. static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
  3024. {
  3025. BYTE* const ostart = (BYTE*)dst;
  3026. BYTE* op = ostart;
  3027. size_t fhSize = 0;
  3028. DEBUGLOG(4, "ZSTD_writeEpilogue");
  3029. RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
  3030. /* special case : empty frame */
  3031. if (cctx->stage == ZSTDcs_init) {
  3032. fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
  3033. FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
  3034. dstCapacity -= fhSize;
  3035. op += fhSize;
  3036. cctx->stage = ZSTDcs_ongoing;
  3037. }
  3038. if (cctx->stage != ZSTDcs_ending) {
  3039. /* write one last empty block, make it the "last" block */
  3040. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
  3041. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
  3042. MEM_writeLE32(op, cBlockHeader24);
  3043. op += ZSTD_blockHeaderSize;
  3044. dstCapacity -= ZSTD_blockHeaderSize;
  3045. }
  3046. if (cctx->appliedParams.fParams.checksumFlag) {
  3047. U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
  3048. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  3049. DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
  3050. MEM_writeLE32(op, checksum);
  3051. op += 4;
  3052. }
  3053. cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
  3054. return op-ostart;
  3055. }
  3056. void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
  3057. {
  3058. (void)cctx;
  3059. (void)extraCSize;
  3060. }
  3061. size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
  3062. void* dst, size_t dstCapacity,
  3063. const void* src, size_t srcSize)
  3064. {
  3065. size_t endResult;
  3066. size_t const cSize = ZSTD_compressContinue_internal(cctx,
  3067. dst, dstCapacity, src, srcSize,
  3068. 1 /* frame mode */, 1 /* last chunk */);
  3069. FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
  3070. endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
  3071. FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
  3072. assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
  3073. if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
  3074. ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
  3075. DEBUGLOG(4, "end of frame : controlling src size");
  3076. RETURN_ERROR_IF(
  3077. cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
  3078. srcSize_wrong,
  3079. "error : pledgedSrcSize = %u, while realSrcSize = %u",
  3080. (unsigned)cctx->pledgedSrcSizePlusOne-1,
  3081. (unsigned)cctx->consumedSrcSize);
  3082. }
  3083. ZSTD_CCtx_trace(cctx, endResult);
  3084. return cSize + endResult;
  3085. }
  3086. size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
  3087. void* dst, size_t dstCapacity,
  3088. const void* src, size_t srcSize,
  3089. const void* dict,size_t dictSize,
  3090. ZSTD_parameters params)
  3091. {
  3092. ZSTD_CCtx_params cctxParams;
  3093. DEBUGLOG(4, "ZSTD_compress_advanced");
  3094. FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
  3095. ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
  3096. return ZSTD_compress_advanced_internal(cctx,
  3097. dst, dstCapacity,
  3098. src, srcSize,
  3099. dict, dictSize,
  3100. &cctxParams);
  3101. }
  3102. /* Internal */
  3103. size_t ZSTD_compress_advanced_internal(
  3104. ZSTD_CCtx* cctx,
  3105. void* dst, size_t dstCapacity,
  3106. const void* src, size_t srcSize,
  3107. const void* dict,size_t dictSize,
  3108. const ZSTD_CCtx_params* params)
  3109. {
  3110. DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
  3111. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  3112. dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
  3113. params, srcSize, ZSTDb_not_buffered) , "");
  3114. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  3115. }
  3116. size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
  3117. void* dst, size_t dstCapacity,
  3118. const void* src, size_t srcSize,
  3119. const void* dict, size_t dictSize,
  3120. int compressionLevel)
  3121. {
  3122. ZSTD_CCtx_params cctxParams;
  3123. {
  3124. ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
  3125. assert(params.fParams.contentSizeFlag == 1);
  3126. ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
  3127. }
  3128. DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
  3129. return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
  3130. }
  3131. size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
  3132. void* dst, size_t dstCapacity,
  3133. const void* src, size_t srcSize,
  3134. int compressionLevel)
  3135. {
  3136. DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
  3137. assert(cctx != NULL);
  3138. return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
  3139. }
  3140. size_t ZSTD_compress(void* dst, size_t dstCapacity,
  3141. const void* src, size_t srcSize,
  3142. int compressionLevel)
  3143. {
  3144. size_t result;
  3145. ZSTD_CCtx* cctx = ZSTD_createCCtx();
  3146. RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
  3147. result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
  3148. ZSTD_freeCCtx(cctx);
  3149. return result;
  3150. }
  3151. /* ===== Dictionary API ===== */
  3152. /*! ZSTD_estimateCDictSize_advanced() :
  3153. * Estimate amount of memory that will be needed to create a dictionary with following arguments */
  3154. size_t ZSTD_estimateCDictSize_advanced(
  3155. size_t dictSize, ZSTD_compressionParameters cParams,
  3156. ZSTD_dictLoadMethod_e dictLoadMethod)
  3157. {
  3158. DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
  3159. return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  3160. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  3161. + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
  3162. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3163. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
  3164. }
  3165. size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
  3166. {
  3167. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3168. return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
  3169. }
  3170. size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
  3171. {
  3172. if (cdict==NULL) return 0; /* support sizeof on NULL */
  3173. DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
  3174. /* cdict may be in the workspace */
  3175. return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
  3176. + ZSTD_cwksp_sizeof(&cdict->workspace);
  3177. }
  3178. static size_t ZSTD_initCDict_internal(
  3179. ZSTD_CDict* cdict,
  3180. const void* dictBuffer, size_t dictSize,
  3181. ZSTD_dictLoadMethod_e dictLoadMethod,
  3182. ZSTD_dictContentType_e dictContentType,
  3183. ZSTD_CCtx_params params)
  3184. {
  3185. DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
  3186. assert(!ZSTD_checkCParams(params.cParams));
  3187. cdict->matchState.cParams = params.cParams;
  3188. cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
  3189. if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
  3190. cdict->matchState.dedicatedDictSearch = 0;
  3191. }
  3192. if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
  3193. cdict->dictContent = dictBuffer;
  3194. } else {
  3195. void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
  3196. RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
  3197. cdict->dictContent = internalBuffer;
  3198. ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
  3199. }
  3200. cdict->dictContentSize = dictSize;
  3201. cdict->dictContentType = dictContentType;
  3202. cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
  3203. /* Reset the state to no dictionary */
  3204. ZSTD_reset_compressedBlockState(&cdict->cBlockState);
  3205. FORWARD_IF_ERROR(ZSTD_reset_matchState(
  3206. &cdict->matchState,
  3207. &cdict->workspace,
  3208. &params.cParams,
  3209. ZSTDcrp_makeClean,
  3210. ZSTDirp_reset,
  3211. ZSTD_resetTarget_CDict), "");
  3212. /* (Maybe) load the dictionary
  3213. * Skips loading the dictionary if it is < 8 bytes.
  3214. */
  3215. { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
  3216. params.fParams.contentSizeFlag = 1;
  3217. { size_t const dictID = ZSTD_compress_insertDictionary(
  3218. &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
  3219. &params, cdict->dictContent, cdict->dictContentSize,
  3220. dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
  3221. FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
  3222. assert(dictID <= (size_t)(U32)-1);
  3223. cdict->dictID = (U32)dictID;
  3224. }
  3225. }
  3226. return 0;
  3227. }
  3228. static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
  3229. ZSTD_dictLoadMethod_e dictLoadMethod,
  3230. ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
  3231. {
  3232. if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
  3233. { size_t const workspaceSize =
  3234. ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
  3235. ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
  3236. ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
  3237. (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3238. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
  3239. void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
  3240. ZSTD_cwksp ws;
  3241. ZSTD_CDict* cdict;
  3242. if (!workspace) {
  3243. ZSTD_customFree(workspace, customMem);
  3244. return NULL;
  3245. }
  3246. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
  3247. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  3248. assert(cdict != NULL);
  3249. ZSTD_cwksp_move(&cdict->workspace, &ws);
  3250. cdict->customMem = customMem;
  3251. cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
  3252. return cdict;
  3253. }
  3254. }
  3255. ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
  3256. ZSTD_dictLoadMethod_e dictLoadMethod,
  3257. ZSTD_dictContentType_e dictContentType,
  3258. ZSTD_compressionParameters cParams,
  3259. ZSTD_customMem customMem)
  3260. {
  3261. ZSTD_CCtx_params cctxParams;
  3262. ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
  3263. ZSTD_CCtxParams_init(&cctxParams, 0);
  3264. cctxParams.cParams = cParams;
  3265. cctxParams.customMem = customMem;
  3266. return ZSTD_createCDict_advanced2(
  3267. dictBuffer, dictSize,
  3268. dictLoadMethod, dictContentType,
  3269. &cctxParams, customMem);
  3270. }
  3271. ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
  3272. const void* dict, size_t dictSize,
  3273. ZSTD_dictLoadMethod_e dictLoadMethod,
  3274. ZSTD_dictContentType_e dictContentType,
  3275. const ZSTD_CCtx_params* originalCctxParams,
  3276. ZSTD_customMem customMem)
  3277. {
  3278. ZSTD_CCtx_params cctxParams = *originalCctxParams;
  3279. ZSTD_compressionParameters cParams;
  3280. ZSTD_CDict* cdict;
  3281. DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
  3282. if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
  3283. if (cctxParams.enableDedicatedDictSearch) {
  3284. cParams = ZSTD_dedicatedDictSearch_getCParams(
  3285. cctxParams.compressionLevel, dictSize);
  3286. ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
  3287. } else {
  3288. cParams = ZSTD_getCParamsFromCCtxParams(
  3289. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3290. }
  3291. if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
  3292. /* Fall back to non-DDSS params */
  3293. cctxParams.enableDedicatedDictSearch = 0;
  3294. cParams = ZSTD_getCParamsFromCCtxParams(
  3295. &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3296. }
  3297. cctxParams.cParams = cParams;
  3298. cdict = ZSTD_createCDict_advanced_internal(dictSize,
  3299. dictLoadMethod, cctxParams.cParams,
  3300. customMem);
  3301. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  3302. dict, dictSize,
  3303. dictLoadMethod, dictContentType,
  3304. cctxParams) )) {
  3305. ZSTD_freeCDict(cdict);
  3306. return NULL;
  3307. }
  3308. return cdict;
  3309. }
  3310. ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
  3311. {
  3312. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3313. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  3314. ZSTD_dlm_byCopy, ZSTD_dct_auto,
  3315. cParams, ZSTD_defaultCMem);
  3316. if (cdict)
  3317. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  3318. return cdict;
  3319. }
  3320. ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
  3321. {
  3322. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
  3323. ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
  3324. ZSTD_dlm_byRef, ZSTD_dct_auto,
  3325. cParams, ZSTD_defaultCMem);
  3326. if (cdict)
  3327. cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
  3328. return cdict;
  3329. }
  3330. size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
  3331. {
  3332. if (cdict==NULL) return 0; /* support free on NULL */
  3333. { ZSTD_customMem const cMem = cdict->customMem;
  3334. int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
  3335. ZSTD_cwksp_free(&cdict->workspace, cMem);
  3336. if (!cdictInWorkspace) {
  3337. ZSTD_customFree(cdict, cMem);
  3338. }
  3339. return 0;
  3340. }
  3341. }
  3342. /*! ZSTD_initStaticCDict_advanced() :
  3343. * Generate a digested dictionary in provided memory area.
  3344. * workspace: The memory area to emplace the dictionary into.
  3345. * Provided pointer must 8-bytes aligned.
  3346. * It must outlive dictionary usage.
  3347. * workspaceSize: Use ZSTD_estimateCDictSize()
  3348. * to determine how large workspace must be.
  3349. * cParams : use ZSTD_getCParams() to transform a compression level
  3350. * into its relevants cParams.
  3351. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
  3352. * Note : there is no corresponding "free" function.
  3353. * Since workspace was allocated externally, it must be freed externally.
  3354. */
  3355. const ZSTD_CDict* ZSTD_initStaticCDict(
  3356. void* workspace, size_t workspaceSize,
  3357. const void* dict, size_t dictSize,
  3358. ZSTD_dictLoadMethod_e dictLoadMethod,
  3359. ZSTD_dictContentType_e dictContentType,
  3360. ZSTD_compressionParameters cParams)
  3361. {
  3362. size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
  3363. size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
  3364. + (dictLoadMethod == ZSTD_dlm_byRef ? 0
  3365. : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
  3366. + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
  3367. + matchStateSize;
  3368. ZSTD_CDict* cdict;
  3369. ZSTD_CCtx_params params;
  3370. if ((size_t)workspace & 7) return NULL; /* 8-aligned */
  3371. {
  3372. ZSTD_cwksp ws;
  3373. ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
  3374. cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
  3375. if (cdict == NULL) return NULL;
  3376. ZSTD_cwksp_move(&cdict->workspace, &ws);
  3377. }
  3378. DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
  3379. (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
  3380. if (workspaceSize < neededSize) return NULL;
  3381. ZSTD_CCtxParams_init(&params, 0);
  3382. params.cParams = cParams;
  3383. if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
  3384. dict, dictSize,
  3385. dictLoadMethod, dictContentType,
  3386. params) ))
  3387. return NULL;
  3388. return cdict;
  3389. }
  3390. ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
  3391. {
  3392. assert(cdict != NULL);
  3393. return cdict->matchState.cParams;
  3394. }
  3395. /*! ZSTD_getDictID_fromCDict() :
  3396. * Provides the dictID of the dictionary loaded into `cdict`.
  3397. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
  3398. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
  3399. unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
  3400. {
  3401. if (cdict==NULL) return 0;
  3402. return cdict->dictID;
  3403. }
  3404. /* ZSTD_compressBegin_usingCDict_advanced() :
  3405. * cdict must be != NULL */
  3406. size_t ZSTD_compressBegin_usingCDict_advanced(
  3407. ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
  3408. ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
  3409. {
  3410. ZSTD_CCtx_params cctxParams;
  3411. DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
  3412. RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
  3413. /* Initialize the cctxParams from the cdict */
  3414. {
  3415. ZSTD_parameters params;
  3416. params.fParams = fParams;
  3417. params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
  3418. || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
  3419. || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
  3420. || cdict->compressionLevel == 0 ) ?
  3421. ZSTD_getCParamsFromCDict(cdict)
  3422. : ZSTD_getCParams(cdict->compressionLevel,
  3423. pledgedSrcSize,
  3424. cdict->dictContentSize);
  3425. ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
  3426. }
  3427. /* Increase window log to fit the entire dictionary and source if the
  3428. * source size is known. Limit the increase to 19, which is the
  3429. * window log for compression level 1 with the largest source size.
  3430. */
  3431. if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
  3432. U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
  3433. U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
  3434. cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
  3435. }
  3436. return ZSTD_compressBegin_internal(cctx,
  3437. NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
  3438. cdict,
  3439. &cctxParams, pledgedSrcSize,
  3440. ZSTDb_not_buffered);
  3441. }
  3442. /* ZSTD_compressBegin_usingCDict() :
  3443. * pledgedSrcSize=0 means "unknown"
  3444. * if pledgedSrcSize>0, it will enable contentSizeFlag */
  3445. size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
  3446. {
  3447. ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  3448. DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
  3449. return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
  3450. }
  3451. size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
  3452. void* dst, size_t dstCapacity,
  3453. const void* src, size_t srcSize,
  3454. const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
  3455. {
  3456. FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
  3457. return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
  3458. }
  3459. /*! ZSTD_compress_usingCDict() :
  3460. * Compression using a digested Dictionary.
  3461. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
  3462. * Note that compression parameters are decided at CDict creation time
  3463. * while frame parameters are hardcoded */
  3464. size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
  3465. void* dst, size_t dstCapacity,
  3466. const void* src, size_t srcSize,
  3467. const ZSTD_CDict* cdict)
  3468. {
  3469. ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
  3470. return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
  3471. }
  3472. /* ******************************************************************
  3473. * Streaming
  3474. ********************************************************************/
  3475. ZSTD_CStream* ZSTD_createCStream(void)
  3476. {
  3477. DEBUGLOG(3, "ZSTD_createCStream");
  3478. return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
  3479. }
  3480. ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
  3481. {
  3482. return ZSTD_initStaticCCtx(workspace, workspaceSize);
  3483. }
  3484. ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
  3485. { /* CStream and CCtx are now same object */
  3486. return ZSTD_createCCtx_advanced(customMem);
  3487. }
  3488. size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
  3489. {
  3490. return ZSTD_freeCCtx(zcs); /* same object */
  3491. }
  3492. /*====== Initialization ======*/
  3493. size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
  3494. size_t ZSTD_CStreamOutSize(void)
  3495. {
  3496. return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
  3497. }
  3498. static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
  3499. {
  3500. if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
  3501. return ZSTD_cpm_attachDict;
  3502. else
  3503. return ZSTD_cpm_noAttachDict;
  3504. }
  3505. /* ZSTD_resetCStream():
  3506. * pledgedSrcSize == 0 means "unknown" */
  3507. size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
  3508. {
  3509. /* temporary : 0 interpreted as "unknown" during transition period.
  3510. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  3511. * 0 will be interpreted as "empty" in the future.
  3512. */
  3513. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3514. DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
  3515. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3516. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3517. return 0;
  3518. }
  3519. /*! ZSTD_initCStream_internal() :
  3520. * Note : for lib/compress only. Used by zstdmt_compress.c.
  3521. * Assumption 1 : params are valid
  3522. * Assumption 2 : either dict, or cdict, is defined, not both */
  3523. size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
  3524. const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
  3525. const ZSTD_CCtx_params* params,
  3526. unsigned long long pledgedSrcSize)
  3527. {
  3528. DEBUGLOG(4, "ZSTD_initCStream_internal");
  3529. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3530. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3531. assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
  3532. zcs->requestedParams = *params;
  3533. assert(!((dict) && (cdict))); /* either dict or cdict, not both */
  3534. if (dict) {
  3535. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3536. } else {
  3537. /* Dictionary is cleared if !cdict */
  3538. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3539. }
  3540. return 0;
  3541. }
  3542. /* ZSTD_initCStream_usingCDict_advanced() :
  3543. * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
  3544. size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
  3545. const ZSTD_CDict* cdict,
  3546. ZSTD_frameParameters fParams,
  3547. unsigned long long pledgedSrcSize)
  3548. {
  3549. DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
  3550. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3551. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3552. zcs->requestedParams.fParams = fParams;
  3553. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3554. return 0;
  3555. }
  3556. /* note : cdict must outlive compression session */
  3557. size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
  3558. {
  3559. DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
  3560. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3561. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
  3562. return 0;
  3563. }
  3564. /* ZSTD_initCStream_advanced() :
  3565. * pledgedSrcSize must be exact.
  3566. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
  3567. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
  3568. size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
  3569. const void* dict, size_t dictSize,
  3570. ZSTD_parameters params, unsigned long long pss)
  3571. {
  3572. /* for compatibility with older programs relying on this behavior.
  3573. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
  3574. * This line will be removed in the future.
  3575. */
  3576. U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3577. DEBUGLOG(4, "ZSTD_initCStream_advanced");
  3578. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3579. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3580. FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
  3581. ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
  3582. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3583. return 0;
  3584. }
  3585. size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
  3586. {
  3587. DEBUGLOG(4, "ZSTD_initCStream_usingDict");
  3588. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3589. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3590. FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
  3591. return 0;
  3592. }
  3593. size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
  3594. {
  3595. /* temporary : 0 interpreted as "unknown" during transition period.
  3596. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
  3597. * 0 will be interpreted as "empty" in the future.
  3598. */
  3599. U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
  3600. DEBUGLOG(4, "ZSTD_initCStream_srcSize");
  3601. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3602. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  3603. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3604. FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
  3605. return 0;
  3606. }
  3607. size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
  3608. {
  3609. DEBUGLOG(4, "ZSTD_initCStream");
  3610. FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
  3611. FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
  3612. FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
  3613. return 0;
  3614. }
  3615. /*====== Compression ======*/
  3616. static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
  3617. {
  3618. size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
  3619. if (hintInSize==0) hintInSize = cctx->blockSize;
  3620. return hintInSize;
  3621. }
  3622. /* ZSTD_compressStream_generic():
  3623. * internal function for all *compressStream*() variants
  3624. * non-static, because can be called from zstdmt_compress.c
  3625. * @return : hint size for next input */
  3626. static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
  3627. ZSTD_outBuffer* output,
  3628. ZSTD_inBuffer* input,
  3629. ZSTD_EndDirective const flushMode)
  3630. {
  3631. const char* const istart = (const char*)input->src;
  3632. const char* const iend = input->size != 0 ? istart + input->size : istart;
  3633. const char* ip = input->pos != 0 ? istart + input->pos : istart;
  3634. char* const ostart = (char*)output->dst;
  3635. char* const oend = output->size != 0 ? ostart + output->size : ostart;
  3636. char* op = output->pos != 0 ? ostart + output->pos : ostart;
  3637. U32 someMoreWork = 1;
  3638. /* check expectations */
  3639. DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
  3640. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3641. assert(zcs->inBuff != NULL);
  3642. assert(zcs->inBuffSize > 0);
  3643. }
  3644. if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
  3645. assert(zcs->outBuff != NULL);
  3646. assert(zcs->outBuffSize > 0);
  3647. }
  3648. assert(output->pos <= output->size);
  3649. assert(input->pos <= input->size);
  3650. assert((U32)flushMode <= (U32)ZSTD_e_end);
  3651. while (someMoreWork) {
  3652. switch(zcs->streamStage)
  3653. {
  3654. case zcss_init:
  3655. RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
  3656. case zcss_load:
  3657. if ( (flushMode == ZSTD_e_end)
  3658. && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
  3659. || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
  3660. && (zcs->inBuffPos == 0) ) {
  3661. /* shortcut to compression pass directly into output buffer */
  3662. size_t const cSize = ZSTD_compressEnd(zcs,
  3663. op, oend-op, ip, iend-ip);
  3664. DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
  3665. FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
  3666. ip = iend;
  3667. op += cSize;
  3668. zcs->frameEnded = 1;
  3669. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3670. someMoreWork = 0; break;
  3671. }
  3672. /* complete loading into inBuffer in buffered mode */
  3673. if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3674. size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
  3675. size_t const loaded = ZSTD_limitCopy(
  3676. zcs->inBuff + zcs->inBuffPos, toLoad,
  3677. ip, iend-ip);
  3678. zcs->inBuffPos += loaded;
  3679. if (loaded != 0)
  3680. ip += loaded;
  3681. if ( (flushMode == ZSTD_e_continue)
  3682. && (zcs->inBuffPos < zcs->inBuffTarget) ) {
  3683. /* not enough input to fill full block : stop here */
  3684. someMoreWork = 0; break;
  3685. }
  3686. if ( (flushMode == ZSTD_e_flush)
  3687. && (zcs->inBuffPos == zcs->inToCompress) ) {
  3688. /* empty */
  3689. someMoreWork = 0; break;
  3690. }
  3691. }
  3692. /* compress current block (note : this stage cannot be stopped in the middle) */
  3693. DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
  3694. { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
  3695. void* cDst;
  3696. size_t cSize;
  3697. size_t oSize = oend-op;
  3698. size_t const iSize = inputBuffered
  3699. ? zcs->inBuffPos - zcs->inToCompress
  3700. : MIN((size_t)(iend - ip), zcs->blockSize);
  3701. if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
  3702. cDst = op; /* compress into output buffer, to skip flush stage */
  3703. else
  3704. cDst = zcs->outBuff, oSize = zcs->outBuffSize;
  3705. if (inputBuffered) {
  3706. unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
  3707. cSize = lastBlock ?
  3708. ZSTD_compressEnd(zcs, cDst, oSize,
  3709. zcs->inBuff + zcs->inToCompress, iSize) :
  3710. ZSTD_compressContinue(zcs, cDst, oSize,
  3711. zcs->inBuff + zcs->inToCompress, iSize);
  3712. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  3713. zcs->frameEnded = lastBlock;
  3714. /* prepare next block */
  3715. zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
  3716. if (zcs->inBuffTarget > zcs->inBuffSize)
  3717. zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
  3718. DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
  3719. (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
  3720. if (!lastBlock)
  3721. assert(zcs->inBuffTarget <= zcs->inBuffSize);
  3722. zcs->inToCompress = zcs->inBuffPos;
  3723. } else {
  3724. unsigned const lastBlock = (ip + iSize == iend);
  3725. assert(flushMode == ZSTD_e_end /* Already validated */);
  3726. cSize = lastBlock ?
  3727. ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
  3728. ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
  3729. /* Consume the input prior to error checking to mirror buffered mode. */
  3730. if (iSize > 0)
  3731. ip += iSize;
  3732. FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
  3733. zcs->frameEnded = lastBlock;
  3734. if (lastBlock)
  3735. assert(ip == iend);
  3736. }
  3737. if (cDst == op) { /* no need to flush */
  3738. op += cSize;
  3739. if (zcs->frameEnded) {
  3740. DEBUGLOG(5, "Frame completed directly in outBuffer");
  3741. someMoreWork = 0;
  3742. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3743. }
  3744. break;
  3745. }
  3746. zcs->outBuffContentSize = cSize;
  3747. zcs->outBuffFlushedSize = 0;
  3748. zcs->streamStage = zcss_flush; /* pass-through to flush stage */
  3749. }
  3750. ZSTD_FALLTHROUGH;
  3751. case zcss_flush:
  3752. DEBUGLOG(5, "flush stage");
  3753. assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
  3754. { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
  3755. size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
  3756. zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
  3757. DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
  3758. (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
  3759. if (flushed)
  3760. op += flushed;
  3761. zcs->outBuffFlushedSize += flushed;
  3762. if (toFlush!=flushed) {
  3763. /* flush not fully completed, presumably because dst is too small */
  3764. assert(op==oend);
  3765. someMoreWork = 0;
  3766. break;
  3767. }
  3768. zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
  3769. if (zcs->frameEnded) {
  3770. DEBUGLOG(5, "Frame completed on flush");
  3771. someMoreWork = 0;
  3772. ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  3773. break;
  3774. }
  3775. zcs->streamStage = zcss_load;
  3776. break;
  3777. }
  3778. default: /* impossible */
  3779. assert(0);
  3780. }
  3781. }
  3782. input->pos = ip - istart;
  3783. output->pos = op - ostart;
  3784. if (zcs->frameEnded) return 0;
  3785. return ZSTD_nextInputSizeHint(zcs);
  3786. }
  3787. static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
  3788. {
  3789. return ZSTD_nextInputSizeHint(cctx);
  3790. }
  3791. size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
  3792. {
  3793. FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
  3794. return ZSTD_nextInputSizeHint_MTorST(zcs);
  3795. }
  3796. /* After a compression call set the expected input/output buffer.
  3797. * This is validated at the start of the next compression call.
  3798. */
  3799. static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
  3800. {
  3801. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  3802. cctx->expectedInBuffer = *input;
  3803. }
  3804. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  3805. cctx->expectedOutBufferSize = output->size - output->pos;
  3806. }
  3807. }
  3808. /* Validate that the input/output buffers match the expectations set by
  3809. * ZSTD_setBufferExpectations.
  3810. */
  3811. static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
  3812. ZSTD_outBuffer const* output,
  3813. ZSTD_inBuffer const* input,
  3814. ZSTD_EndDirective endOp)
  3815. {
  3816. if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
  3817. ZSTD_inBuffer const expect = cctx->expectedInBuffer;
  3818. if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
  3819. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
  3820. if (endOp != ZSTD_e_end)
  3821. RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
  3822. }
  3823. if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
  3824. size_t const outBufferSize = output->size - output->pos;
  3825. if (cctx->expectedOutBufferSize != outBufferSize)
  3826. RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
  3827. }
  3828. return 0;
  3829. }
  3830. static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
  3831. ZSTD_EndDirective endOp,
  3832. size_t inSize) {
  3833. ZSTD_CCtx_params params = cctx->requestedParams;
  3834. ZSTD_prefixDict const prefixDict = cctx->prefixDict;
  3835. FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
  3836. ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
  3837. assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
  3838. if (cctx->cdict)
  3839. params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
  3840. DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
  3841. if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
  3842. {
  3843. size_t const dictSize = prefixDict.dict
  3844. ? prefixDict.dictSize
  3845. : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
  3846. ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
  3847. params.cParams = ZSTD_getCParamsFromCCtxParams(
  3848. &params, cctx->pledgedSrcSizePlusOne-1,
  3849. dictSize, mode);
  3850. }
  3851. if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
  3852. /* Enable LDM by default for optimal parser and window size >= 128MB */
  3853. DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
  3854. params.ldmParams.enableLdm = 1;
  3855. }
  3856. { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
  3857. assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
  3858. FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
  3859. prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
  3860. cctx->cdict,
  3861. &params, pledgedSrcSize,
  3862. ZSTDb_buffered) , "");
  3863. assert(cctx->appliedParams.nbWorkers == 0);
  3864. cctx->inToCompress = 0;
  3865. cctx->inBuffPos = 0;
  3866. if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
  3867. /* for small input: avoid automatic flush on reaching end of block, since
  3868. * it would require to add a 3-bytes null block to end frame
  3869. */
  3870. cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
  3871. } else {
  3872. cctx->inBuffTarget = 0;
  3873. }
  3874. cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
  3875. cctx->streamStage = zcss_load;
  3876. cctx->frameEnded = 0;
  3877. }
  3878. return 0;
  3879. }
  3880. size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
  3881. ZSTD_outBuffer* output,
  3882. ZSTD_inBuffer* input,
  3883. ZSTD_EndDirective endOp)
  3884. {
  3885. DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
  3886. /* check conditions */
  3887. RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
  3888. RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
  3889. RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
  3890. assert(cctx != NULL);
  3891. /* transparent initialization stage */
  3892. if (cctx->streamStage == zcss_init) {
  3893. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
  3894. ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
  3895. }
  3896. /* end of transparent initialization stage */
  3897. FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
  3898. /* compression stage */
  3899. FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
  3900. DEBUGLOG(5, "completed ZSTD_compressStream2");
  3901. ZSTD_setBufferExpectations(cctx, output, input);
  3902. return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
  3903. }
  3904. size_t ZSTD_compressStream2_simpleArgs (
  3905. ZSTD_CCtx* cctx,
  3906. void* dst, size_t dstCapacity, size_t* dstPos,
  3907. const void* src, size_t srcSize, size_t* srcPos,
  3908. ZSTD_EndDirective endOp)
  3909. {
  3910. ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
  3911. ZSTD_inBuffer input = { src, srcSize, *srcPos };
  3912. /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
  3913. size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
  3914. *dstPos = output.pos;
  3915. *srcPos = input.pos;
  3916. return cErr;
  3917. }
  3918. size_t ZSTD_compress2(ZSTD_CCtx* cctx,
  3919. void* dst, size_t dstCapacity,
  3920. const void* src, size_t srcSize)
  3921. {
  3922. ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
  3923. ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
  3924. DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
  3925. ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
  3926. /* Enable stable input/output buffers. */
  3927. cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
  3928. cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
  3929. { size_t oPos = 0;
  3930. size_t iPos = 0;
  3931. size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
  3932. dst, dstCapacity, &oPos,
  3933. src, srcSize, &iPos,
  3934. ZSTD_e_end);
  3935. /* Reset to the original values. */
  3936. cctx->requestedParams.inBufferMode = originalInBufferMode;
  3937. cctx->requestedParams.outBufferMode = originalOutBufferMode;
  3938. FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
  3939. if (result != 0) { /* compression not completed, due to lack of output space */
  3940. assert(oPos == dstCapacity);
  3941. RETURN_ERROR(dstSize_tooSmall, "");
  3942. }
  3943. assert(iPos == srcSize); /* all input is expected consumed */
  3944. return oPos;
  3945. }
  3946. }
  3947. typedef struct {
  3948. U32 idx; /* Index in array of ZSTD_Sequence */
  3949. U32 posInSequence; /* Position within sequence at idx */
  3950. size_t posInSrc; /* Number of bytes given by sequences provided so far */
  3951. } ZSTD_sequencePosition;
  3952. /* Returns a ZSTD error code if sequence is not valid */
  3953. static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
  3954. size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
  3955. size_t offsetBound;
  3956. U32 windowSize = 1 << windowLog;
  3957. /* posInSrc represents the amount of data the the decoder would decode up to this point.
  3958. * As long as the amount of data decoded is less than or equal to window size, offsets may be
  3959. * larger than the total length of output decoded in order to reference the dict, even larger than
  3960. * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
  3961. */
  3962. offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
  3963. RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
  3964. RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
  3965. return 0;
  3966. }
  3967. /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
  3968. static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
  3969. U32 offCode = rawOffset + ZSTD_REP_MOVE;
  3970. U32 repCode = 0;
  3971. if (!ll0 && rawOffset == rep[0]) {
  3972. repCode = 1;
  3973. } else if (rawOffset == rep[1]) {
  3974. repCode = 2 - ll0;
  3975. } else if (rawOffset == rep[2]) {
  3976. repCode = 3 - ll0;
  3977. } else if (ll0 && rawOffset == rep[0] - 1) {
  3978. repCode = 3;
  3979. }
  3980. if (repCode) {
  3981. /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
  3982. offCode = repCode - 1;
  3983. }
  3984. return offCode;
  3985. }
  3986. /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
  3987. * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
  3988. */
  3989. static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  3990. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  3991. const void* src, size_t blockSize) {
  3992. U32 idx = seqPos->idx;
  3993. BYTE const* ip = (BYTE const*)(src);
  3994. const BYTE* const iend = ip + blockSize;
  3995. repcodes_t updatedRepcodes;
  3996. U32 dictSize;
  3997. U32 litLength;
  3998. U32 matchLength;
  3999. U32 ll0;
  4000. U32 offCode;
  4001. if (cctx->cdict) {
  4002. dictSize = (U32)cctx->cdict->dictContentSize;
  4003. } else if (cctx->prefixDict.dict) {
  4004. dictSize = (U32)cctx->prefixDict.dictSize;
  4005. } else {
  4006. dictSize = 0;
  4007. }
  4008. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  4009. for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
  4010. litLength = inSeqs[idx].litLength;
  4011. matchLength = inSeqs[idx].matchLength;
  4012. ll0 = litLength == 0;
  4013. offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
  4014. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  4015. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  4016. if (cctx->appliedParams.validateSequences) {
  4017. seqPos->posInSrc += litLength + matchLength;
  4018. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  4019. cctx->appliedParams.cParams.windowLog, dictSize,
  4020. cctx->appliedParams.cParams.minMatch),
  4021. "Sequence validation failed");
  4022. }
  4023. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  4024. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  4025. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
  4026. ip += matchLength + litLength;
  4027. }
  4028. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  4029. if (inSeqs[idx].litLength) {
  4030. DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
  4031. ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
  4032. ip += inSeqs[idx].litLength;
  4033. seqPos->posInSrc += inSeqs[idx].litLength;
  4034. }
  4035. RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
  4036. seqPos->idx = idx+1;
  4037. return 0;
  4038. }
  4039. /* Returns the number of bytes to move the current read position back by. Only non-zero
  4040. * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
  4041. * went wrong.
  4042. *
  4043. * This function will attempt to scan through blockSize bytes represented by the sequences
  4044. * in inSeqs, storing any (partial) sequences.
  4045. *
  4046. * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
  4047. * avoid splitting a match, or to avoid splitting a match such that it would produce a match
  4048. * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
  4049. */
  4050. static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  4051. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  4052. const void* src, size_t blockSize) {
  4053. U32 idx = seqPos->idx;
  4054. U32 startPosInSequence = seqPos->posInSequence;
  4055. U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
  4056. size_t dictSize;
  4057. BYTE const* ip = (BYTE const*)(src);
  4058. BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
  4059. repcodes_t updatedRepcodes;
  4060. U32 bytesAdjustment = 0;
  4061. U32 finalMatchSplit = 0;
  4062. U32 litLength;
  4063. U32 matchLength;
  4064. U32 rawOffset;
  4065. U32 offCode;
  4066. if (cctx->cdict) {
  4067. dictSize = cctx->cdict->dictContentSize;
  4068. } else if (cctx->prefixDict.dict) {
  4069. dictSize = cctx->prefixDict.dictSize;
  4070. } else {
  4071. dictSize = 0;
  4072. }
  4073. DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
  4074. DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  4075. ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
  4076. while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
  4077. const ZSTD_Sequence currSeq = inSeqs[idx];
  4078. litLength = currSeq.litLength;
  4079. matchLength = currSeq.matchLength;
  4080. rawOffset = currSeq.offset;
  4081. /* Modify the sequence depending on where endPosInSequence lies */
  4082. if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
  4083. if (startPosInSequence >= litLength) {
  4084. startPosInSequence -= litLength;
  4085. litLength = 0;
  4086. matchLength -= startPosInSequence;
  4087. } else {
  4088. litLength -= startPosInSequence;
  4089. }
  4090. /* Move to the next sequence */
  4091. endPosInSequence -= currSeq.litLength + currSeq.matchLength;
  4092. startPosInSequence = 0;
  4093. idx++;
  4094. } else {
  4095. /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
  4096. does not reach the end of the match. So, we have to split the sequence */
  4097. DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
  4098. currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
  4099. if (endPosInSequence > litLength) {
  4100. U32 firstHalfMatchLength;
  4101. litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
  4102. firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
  4103. if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
  4104. /* Only ever split the match if it is larger than the block size */
  4105. U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
  4106. if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
  4107. /* Move the endPosInSequence backward so that it creates match of minMatch length */
  4108. endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  4109. bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
  4110. firstHalfMatchLength -= bytesAdjustment;
  4111. }
  4112. matchLength = firstHalfMatchLength;
  4113. /* Flag that we split the last match - after storing the sequence, exit the loop,
  4114. but keep the value of endPosInSequence */
  4115. finalMatchSplit = 1;
  4116. } else {
  4117. /* Move the position in sequence backwards so that we don't split match, and break to store
  4118. * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
  4119. * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
  4120. * would cause the first half of the match to be too small
  4121. */
  4122. bytesAdjustment = endPosInSequence - currSeq.litLength;
  4123. endPosInSequence = currSeq.litLength;
  4124. break;
  4125. }
  4126. } else {
  4127. /* This sequence ends inside the literals, break to store the last literals */
  4128. break;
  4129. }
  4130. }
  4131. /* Check if this offset can be represented with a repcode */
  4132. { U32 ll0 = (litLength == 0);
  4133. offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
  4134. updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
  4135. }
  4136. if (cctx->appliedParams.validateSequences) {
  4137. seqPos->posInSrc += litLength + matchLength;
  4138. FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
  4139. cctx->appliedParams.cParams.windowLog, dictSize,
  4140. cctx->appliedParams.cParams.minMatch),
  4141. "Sequence validation failed");
  4142. }
  4143. DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
  4144. RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
  4145. "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
  4146. ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
  4147. ip += matchLength + litLength;
  4148. }
  4149. DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
  4150. assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
  4151. seqPos->idx = idx;
  4152. seqPos->posInSequence = endPosInSequence;
  4153. ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
  4154. iend -= bytesAdjustment;
  4155. if (ip != iend) {
  4156. /* Store any last literals */
  4157. U32 lastLLSize = (U32)(iend - ip);
  4158. assert(ip <= iend);
  4159. DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
  4160. ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
  4161. seqPos->posInSrc += lastLLSize;
  4162. }
  4163. return bytesAdjustment;
  4164. }
  4165. typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
  4166. const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
  4167. const void* src, size_t blockSize);
  4168. static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
  4169. ZSTD_sequenceCopier sequenceCopier = NULL;
  4170. assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
  4171. if (mode == ZSTD_sf_explicitBlockDelimiters) {
  4172. return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
  4173. } else if (mode == ZSTD_sf_noBlockDelimiters) {
  4174. return ZSTD_copySequencesToSeqStoreNoBlockDelim;
  4175. }
  4176. assert(sequenceCopier != NULL);
  4177. return sequenceCopier;
  4178. }
  4179. /* Compress, block-by-block, all of the sequences given.
  4180. *
  4181. * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
  4182. */
  4183. static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
  4184. void* dst, size_t dstCapacity,
  4185. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  4186. const void* src, size_t srcSize) {
  4187. size_t cSize = 0;
  4188. U32 lastBlock;
  4189. size_t blockSize;
  4190. size_t compressedSeqsSize;
  4191. size_t remaining = srcSize;
  4192. ZSTD_sequencePosition seqPos = {0, 0, 0};
  4193. BYTE const* ip = (BYTE const*)src;
  4194. BYTE* op = (BYTE*)dst;
  4195. ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
  4196. DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
  4197. /* Special case: empty frame */
  4198. if (remaining == 0) {
  4199. U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
  4200. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
  4201. MEM_writeLE32(op, cBlockHeader24);
  4202. op += ZSTD_blockHeaderSize;
  4203. dstCapacity -= ZSTD_blockHeaderSize;
  4204. cSize += ZSTD_blockHeaderSize;
  4205. }
  4206. while (remaining) {
  4207. size_t cBlockSize;
  4208. size_t additionalByteAdjustment;
  4209. lastBlock = remaining <= cctx->blockSize;
  4210. blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
  4211. ZSTD_resetSeqStore(&cctx->seqStore);
  4212. DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
  4213. additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
  4214. FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
  4215. blockSize -= additionalByteAdjustment;
  4216. /* If blocks are too small, emit as a nocompress block */
  4217. if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
  4218. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  4219. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  4220. DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
  4221. cSize += cBlockSize;
  4222. ip += blockSize;
  4223. op += cBlockSize;
  4224. remaining -= blockSize;
  4225. dstCapacity -= cBlockSize;
  4226. continue;
  4227. }
  4228. compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
  4229. &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
  4230. &cctx->appliedParams,
  4231. op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
  4232. blockSize,
  4233. cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
  4234. cctx->bmi2);
  4235. FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
  4236. DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
  4237. if (!cctx->isFirstBlock &&
  4238. ZSTD_maybeRLE(&cctx->seqStore) &&
  4239. ZSTD_isRLE((BYTE const*)src, srcSize)) {
  4240. /* We don't want to emit our first block as a RLE even if it qualifies because
  4241. * doing so will cause the decoder (cli only) to throw a "should consume all input error."
  4242. * This is only an issue for zstd <= v1.4.3
  4243. */
  4244. compressedSeqsSize = 1;
  4245. }
  4246. if (compressedSeqsSize == 0) {
  4247. /* ZSTD_noCompressBlock writes the block header as well */
  4248. cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
  4249. FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
  4250. DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
  4251. } else if (compressedSeqsSize == 1) {
  4252. cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
  4253. FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
  4254. DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
  4255. } else {
  4256. U32 cBlockHeader;
  4257. /* Error checking and repcodes update */
  4258. ZSTD_confirmRepcodesAndEntropyTables(cctx);
  4259. if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
  4260. cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
  4261. /* Write block header into beginning of block*/
  4262. cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
  4263. MEM_writeLE24(op, cBlockHeader);
  4264. cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
  4265. DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
  4266. }
  4267. cSize += cBlockSize;
  4268. DEBUGLOG(4, "cSize running total: %zu", cSize);
  4269. if (lastBlock) {
  4270. break;
  4271. } else {
  4272. ip += blockSize;
  4273. op += cBlockSize;
  4274. remaining -= blockSize;
  4275. dstCapacity -= cBlockSize;
  4276. cctx->isFirstBlock = 0;
  4277. }
  4278. }
  4279. return cSize;
  4280. }
  4281. size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
  4282. const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
  4283. const void* src, size_t srcSize) {
  4284. BYTE* op = (BYTE*)dst;
  4285. size_t cSize = 0;
  4286. size_t compressedBlocksSize = 0;
  4287. size_t frameHeaderSize = 0;
  4288. /* Transparent initialization stage, same as compressStream2() */
  4289. DEBUGLOG(3, "ZSTD_compressSequences()");
  4290. assert(cctx != NULL);
  4291. FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
  4292. /* Begin writing output, starting with frame header */
  4293. frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
  4294. op += frameHeaderSize;
  4295. dstCapacity -= frameHeaderSize;
  4296. cSize += frameHeaderSize;
  4297. if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
  4298. xxh64_update(&cctx->xxhState, src, srcSize);
  4299. }
  4300. /* cSize includes block header size and compressed sequences size */
  4301. compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
  4302. op, dstCapacity,
  4303. inSeqs, inSeqsSize,
  4304. src, srcSize);
  4305. FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
  4306. cSize += compressedBlocksSize;
  4307. dstCapacity -= compressedBlocksSize;
  4308. if (cctx->appliedParams.fParams.checksumFlag) {
  4309. U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
  4310. RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
  4311. DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
  4312. MEM_writeLE32((char*)dst + cSize, checksum);
  4313. cSize += 4;
  4314. }
  4315. DEBUGLOG(3, "Final compressed size: %zu", cSize);
  4316. return cSize;
  4317. }
  4318. /*====== Finalize ======*/
  4319. /*! ZSTD_flushStream() :
  4320. * @return : amount of data remaining to flush */
  4321. size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  4322. {
  4323. ZSTD_inBuffer input = { NULL, 0, 0 };
  4324. return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
  4325. }
  4326. size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
  4327. {
  4328. ZSTD_inBuffer input = { NULL, 0, 0 };
  4329. size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
  4330. FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
  4331. if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
  4332. /* single thread mode : attempt to calculate remaining to flush more precisely */
  4333. { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
  4334. size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
  4335. size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
  4336. DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
  4337. return toFlush;
  4338. }
  4339. }
  4340. /*-===== Pre-defined compression levels =====-*/
  4341. #define ZSTD_MAX_CLEVEL 22
  4342. int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
  4343. int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
  4344. static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
  4345. { /* "default" - for any srcSize > 256 KB */
  4346. /* W, C, H, S, L, TL, strat */
  4347. { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
  4348. { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
  4349. { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
  4350. { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
  4351. { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
  4352. { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
  4353. { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
  4354. { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
  4355. { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
  4356. { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
  4357. { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
  4358. { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
  4359. { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
  4360. { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
  4361. { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
  4362. { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
  4363. { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
  4364. { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
  4365. { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
  4366. { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
  4367. { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
  4368. { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
  4369. { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
  4370. },
  4371. { /* for srcSize <= 256 KB */
  4372. /* W, C, H, S, L, T, strat */
  4373. { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4374. { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
  4375. { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
  4376. { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
  4377. { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
  4378. { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
  4379. { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
  4380. { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
  4381. { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
  4382. { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
  4383. { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
  4384. { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
  4385. { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
  4386. { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
  4387. { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
  4388. { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
  4389. { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
  4390. { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
  4391. { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
  4392. { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
  4393. { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
  4394. { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
  4395. { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
  4396. },
  4397. { /* for srcSize <= 128 KB */
  4398. /* W, C, H, S, L, T, strat */
  4399. { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4400. { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
  4401. { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
  4402. { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
  4403. { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
  4404. { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
  4405. { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
  4406. { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
  4407. { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
  4408. { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
  4409. { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
  4410. { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
  4411. { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
  4412. { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
  4413. { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
  4414. { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
  4415. { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
  4416. { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
  4417. { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
  4418. { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
  4419. { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
  4420. { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
  4421. { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
  4422. },
  4423. { /* for srcSize <= 16 KB */
  4424. /* W, C, H, S, L, T, strat */
  4425. { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
  4426. { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
  4427. { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
  4428. { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
  4429. { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
  4430. { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
  4431. { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
  4432. { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
  4433. { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
  4434. { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
  4435. { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
  4436. { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
  4437. { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
  4438. { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
  4439. { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
  4440. { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
  4441. { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
  4442. { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
  4443. { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
  4444. { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
  4445. { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
  4446. { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
  4447. { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
  4448. },
  4449. };
  4450. static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
  4451. {
  4452. ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
  4453. switch (cParams.strategy) {
  4454. case ZSTD_fast:
  4455. case ZSTD_dfast:
  4456. break;
  4457. case ZSTD_greedy:
  4458. case ZSTD_lazy:
  4459. case ZSTD_lazy2:
  4460. cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
  4461. break;
  4462. case ZSTD_btlazy2:
  4463. case ZSTD_btopt:
  4464. case ZSTD_btultra:
  4465. case ZSTD_btultra2:
  4466. break;
  4467. }
  4468. return cParams;
  4469. }
  4470. static int ZSTD_dedicatedDictSearch_isSupported(
  4471. ZSTD_compressionParameters const* cParams)
  4472. {
  4473. return (cParams->strategy >= ZSTD_greedy)
  4474. && (cParams->strategy <= ZSTD_lazy2)
  4475. && (cParams->hashLog >= cParams->chainLog)
  4476. && (cParams->chainLog <= 24);
  4477. }
  4478. /*
  4479. * Reverses the adjustment applied to cparams when enabling dedicated dict
  4480. * search. This is used to recover the params set to be used in the working
  4481. * context. (Otherwise, those tables would also grow.)
  4482. */
  4483. static void ZSTD_dedicatedDictSearch_revertCParams(
  4484. ZSTD_compressionParameters* cParams) {
  4485. switch (cParams->strategy) {
  4486. case ZSTD_fast:
  4487. case ZSTD_dfast:
  4488. break;
  4489. case ZSTD_greedy:
  4490. case ZSTD_lazy:
  4491. case ZSTD_lazy2:
  4492. cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
  4493. break;
  4494. case ZSTD_btlazy2:
  4495. case ZSTD_btopt:
  4496. case ZSTD_btultra:
  4497. case ZSTD_btultra2:
  4498. break;
  4499. }
  4500. }
  4501. static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  4502. {
  4503. switch (mode) {
  4504. case ZSTD_cpm_unknown:
  4505. case ZSTD_cpm_noAttachDict:
  4506. case ZSTD_cpm_createCDict:
  4507. break;
  4508. case ZSTD_cpm_attachDict:
  4509. dictSize = 0;
  4510. break;
  4511. default:
  4512. assert(0);
  4513. break;
  4514. }
  4515. { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
  4516. size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
  4517. return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
  4518. }
  4519. }
  4520. /*! ZSTD_getCParams_internal() :
  4521. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  4522. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
  4523. * Use dictSize == 0 for unknown or unused.
  4524. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
  4525. static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
  4526. {
  4527. U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
  4528. U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
  4529. int row;
  4530. DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
  4531. /* row */
  4532. if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
  4533. else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
  4534. else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
  4535. else row = compressionLevel;
  4536. { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
  4537. /* acceleration factor */
  4538. if (compressionLevel < 0) {
  4539. int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
  4540. cp.targetLength = (unsigned)(-clampedCompressionLevel);
  4541. }
  4542. /* refine parameters based on srcSize & dictSize */
  4543. return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
  4544. }
  4545. }
  4546. /*! ZSTD_getCParams() :
  4547. * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
  4548. * Size values are optional, provide 0 if not known or unused */
  4549. ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
  4550. {
  4551. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  4552. return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  4553. }
  4554. /*! ZSTD_getParams() :
  4555. * same idea as ZSTD_getCParams()
  4556. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  4557. * Fields of `ZSTD_frameParameters` are set to default values */
  4558. static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
  4559. ZSTD_parameters params;
  4560. ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
  4561. DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
  4562. ZSTD_memset(&params, 0, sizeof(params));
  4563. params.cParams = cParams;
  4564. params.fParams.contentSizeFlag = 1;
  4565. return params;
  4566. }
  4567. /*! ZSTD_getParams() :
  4568. * same idea as ZSTD_getCParams()
  4569. * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
  4570. * Fields of `ZSTD_frameParameters` are set to default values */
  4571. ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
  4572. if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
  4573. return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
  4574. }