run.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
  5. *
  6. * TODO: try to use extents tree (instead of array)
  7. */
  8. #include <linux/blkdev.h>
  9. #include <linux/fs.h>
  10. #include <linux/log2.h>
  11. #include "debug.h"
  12. #include "ntfs.h"
  13. #include "ntfs_fs.h"
  14. /* runs_tree is a continues memory. Try to avoid big size. */
  15. #define NTFS3_RUN_MAX_BYTES 0x10000
  16. struct ntfs_run {
  17. CLST vcn; /* Virtual cluster number. */
  18. CLST len; /* Length in clusters. */
  19. CLST lcn; /* Logical cluster number. */
  20. };
  21. /*
  22. * run_lookup - Lookup the index of a MCB entry that is first <= vcn.
  23. *
  24. * Case of success it will return non-zero value and set
  25. * @index parameter to index of entry been found.
  26. * Case of entry missing from list 'index' will be set to
  27. * point to insertion position for the entry question.
  28. */
  29. static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
  30. {
  31. size_t min_idx, max_idx, mid_idx;
  32. struct ntfs_run *r;
  33. if (!run->count) {
  34. *index = 0;
  35. return false;
  36. }
  37. min_idx = 0;
  38. max_idx = run->count - 1;
  39. /* Check boundary cases specially, 'cause they cover the often requests. */
  40. r = run->runs;
  41. if (vcn < r->vcn) {
  42. *index = 0;
  43. return false;
  44. }
  45. if (vcn < r->vcn + r->len) {
  46. *index = 0;
  47. return true;
  48. }
  49. r += max_idx;
  50. if (vcn >= r->vcn + r->len) {
  51. *index = run->count;
  52. return false;
  53. }
  54. if (vcn >= r->vcn) {
  55. *index = max_idx;
  56. return true;
  57. }
  58. do {
  59. mid_idx = min_idx + ((max_idx - min_idx) >> 1);
  60. r = run->runs + mid_idx;
  61. if (vcn < r->vcn) {
  62. max_idx = mid_idx - 1;
  63. if (!mid_idx)
  64. break;
  65. } else if (vcn >= r->vcn + r->len) {
  66. min_idx = mid_idx + 1;
  67. } else {
  68. *index = mid_idx;
  69. return true;
  70. }
  71. } while (min_idx <= max_idx);
  72. *index = max_idx + 1;
  73. return false;
  74. }
  75. /*
  76. * run_consolidate - Consolidate runs starting from a given one.
  77. */
  78. static void run_consolidate(struct runs_tree *run, size_t index)
  79. {
  80. size_t i;
  81. struct ntfs_run *r = run->runs + index;
  82. while (index + 1 < run->count) {
  83. /*
  84. * I should merge current run with next
  85. * if start of the next run lies inside one being tested.
  86. */
  87. struct ntfs_run *n = r + 1;
  88. CLST end = r->vcn + r->len;
  89. CLST dl;
  90. /* Stop if runs are not aligned one to another. */
  91. if (n->vcn > end)
  92. break;
  93. dl = end - n->vcn;
  94. /*
  95. * If range at index overlaps with next one
  96. * then I will either adjust it's start position
  97. * or (if completely matches) dust remove one from the list.
  98. */
  99. if (dl > 0) {
  100. if (n->len <= dl)
  101. goto remove_next_range;
  102. n->len -= dl;
  103. n->vcn += dl;
  104. if (n->lcn != SPARSE_LCN)
  105. n->lcn += dl;
  106. dl = 0;
  107. }
  108. /*
  109. * Stop if sparse mode does not match
  110. * both current and next runs.
  111. */
  112. if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
  113. index += 1;
  114. r = n;
  115. continue;
  116. }
  117. /*
  118. * Check if volume block
  119. * of a next run lcn does not match
  120. * last volume block of the current run.
  121. */
  122. if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
  123. break;
  124. /*
  125. * Next and current are siblings.
  126. * Eat/join.
  127. */
  128. r->len += n->len - dl;
  129. remove_next_range:
  130. i = run->count - (index + 1);
  131. if (i > 1)
  132. memmove(n, n + 1, sizeof(*n) * (i - 1));
  133. run->count -= 1;
  134. }
  135. }
  136. /*
  137. * run_is_mapped_full
  138. *
  139. * Return: True if range [svcn - evcn] is mapped.
  140. */
  141. bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
  142. {
  143. size_t i;
  144. const struct ntfs_run *r, *end;
  145. CLST next_vcn;
  146. if (!run_lookup(run, svcn, &i))
  147. return false;
  148. end = run->runs + run->count;
  149. r = run->runs + i;
  150. for (;;) {
  151. next_vcn = r->vcn + r->len;
  152. if (next_vcn > evcn)
  153. return true;
  154. if (++r >= end)
  155. return false;
  156. if (r->vcn != next_vcn)
  157. return false;
  158. }
  159. }
  160. bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
  161. CLST *len, size_t *index)
  162. {
  163. size_t idx;
  164. CLST gap;
  165. struct ntfs_run *r;
  166. /* Fail immediately if nrun was not touched yet. */
  167. if (!run->runs)
  168. return false;
  169. if (!run_lookup(run, vcn, &idx))
  170. return false;
  171. r = run->runs + idx;
  172. if (vcn >= r->vcn + r->len)
  173. return false;
  174. gap = vcn - r->vcn;
  175. if (r->len <= gap)
  176. return false;
  177. *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
  178. if (len)
  179. *len = r->len - gap;
  180. if (index)
  181. *index = idx;
  182. return true;
  183. }
  184. /*
  185. * run_truncate_head - Decommit the range before vcn.
  186. */
  187. void run_truncate_head(struct runs_tree *run, CLST vcn)
  188. {
  189. size_t index;
  190. struct ntfs_run *r;
  191. if (run_lookup(run, vcn, &index)) {
  192. r = run->runs + index;
  193. if (vcn > r->vcn) {
  194. CLST dlen = vcn - r->vcn;
  195. r->vcn = vcn;
  196. r->len -= dlen;
  197. if (r->lcn != SPARSE_LCN)
  198. r->lcn += dlen;
  199. }
  200. if (!index)
  201. return;
  202. }
  203. r = run->runs;
  204. memmove(r, r + index, sizeof(*r) * (run->count - index));
  205. run->count -= index;
  206. if (!run->count) {
  207. kvfree(run->runs);
  208. run->runs = NULL;
  209. run->allocated = 0;
  210. }
  211. }
  212. /*
  213. * run_truncate - Decommit the range after vcn.
  214. */
  215. void run_truncate(struct runs_tree *run, CLST vcn)
  216. {
  217. size_t index;
  218. /*
  219. * If I hit the range then
  220. * I have to truncate one.
  221. * If range to be truncated is becoming empty
  222. * then it will entirely be removed.
  223. */
  224. if (run_lookup(run, vcn, &index)) {
  225. struct ntfs_run *r = run->runs + index;
  226. r->len = vcn - r->vcn;
  227. if (r->len > 0)
  228. index += 1;
  229. }
  230. /*
  231. * At this point 'index' is set to position that
  232. * should be thrown away (including index itself)
  233. * Simple one - just set the limit.
  234. */
  235. run->count = index;
  236. /* Do not reallocate array 'runs'. Only free if possible. */
  237. if (!index) {
  238. kvfree(run->runs);
  239. run->runs = NULL;
  240. run->allocated = 0;
  241. }
  242. }
  243. /*
  244. * run_truncate_around - Trim head and tail if necessary.
  245. */
  246. void run_truncate_around(struct runs_tree *run, CLST vcn)
  247. {
  248. run_truncate_head(run, vcn);
  249. if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
  250. run_truncate(run, (run->runs + (run->count >> 1))->vcn);
  251. }
  252. /*
  253. * run_add_entry
  254. *
  255. * Sets location to known state.
  256. * Run to be added may overlap with existing location.
  257. *
  258. * Return: false if of memory.
  259. */
  260. bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
  261. bool is_mft)
  262. {
  263. size_t used, index;
  264. struct ntfs_run *r;
  265. bool inrange;
  266. CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
  267. bool should_add_tail = false;
  268. /*
  269. * Lookup the insertion point.
  270. *
  271. * Execute bsearch for the entry containing
  272. * start position question.
  273. */
  274. inrange = run_lookup(run, vcn, &index);
  275. /*
  276. * Shortcut here would be case of
  277. * range not been found but one been added
  278. * continues previous run.
  279. * This case I can directly make use of
  280. * existing range as my start point.
  281. */
  282. if (!inrange && index > 0) {
  283. struct ntfs_run *t = run->runs + index - 1;
  284. if (t->vcn + t->len == vcn &&
  285. (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
  286. (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
  287. inrange = true;
  288. index -= 1;
  289. }
  290. }
  291. /*
  292. * At this point 'index' either points to the range
  293. * containing start position or to the insertion position
  294. * for a new range.
  295. * So first let's check if range I'm probing is here already.
  296. */
  297. if (!inrange) {
  298. requires_new_range:
  299. /*
  300. * Range was not found.
  301. * Insert at position 'index'
  302. */
  303. used = run->count * sizeof(struct ntfs_run);
  304. /*
  305. * Check allocated space.
  306. * If one is not enough to get one more entry
  307. * then it will be reallocated.
  308. */
  309. if (run->allocated < used + sizeof(struct ntfs_run)) {
  310. size_t bytes;
  311. struct ntfs_run *new_ptr;
  312. /* Use power of 2 for 'bytes'. */
  313. if (!used) {
  314. bytes = 64;
  315. } else if (used <= 16 * PAGE_SIZE) {
  316. if (is_power_of_2(run->allocated))
  317. bytes = run->allocated << 1;
  318. else
  319. bytes = (size_t)1
  320. << (2 + blksize_bits(used));
  321. } else {
  322. bytes = run->allocated + (16 * PAGE_SIZE);
  323. }
  324. WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
  325. new_ptr = kvmalloc(bytes, GFP_KERNEL);
  326. if (!new_ptr)
  327. return false;
  328. r = new_ptr + index;
  329. memcpy(new_ptr, run->runs,
  330. index * sizeof(struct ntfs_run));
  331. memcpy(r + 1, run->runs + index,
  332. sizeof(struct ntfs_run) * (run->count - index));
  333. kvfree(run->runs);
  334. run->runs = new_ptr;
  335. run->allocated = bytes;
  336. } else {
  337. size_t i = run->count - index;
  338. r = run->runs + index;
  339. /* memmove appears to be a bottle neck here... */
  340. if (i > 0)
  341. memmove(r + 1, r, sizeof(struct ntfs_run) * i);
  342. }
  343. r->vcn = vcn;
  344. r->lcn = lcn;
  345. r->len = len;
  346. run->count += 1;
  347. } else {
  348. r = run->runs + index;
  349. /*
  350. * If one of ranges was not allocated then we
  351. * have to split location we just matched and
  352. * insert current one.
  353. * A common case this requires tail to be reinserted
  354. * a recursive call.
  355. */
  356. if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
  357. (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
  358. CLST to_eat = vcn - r->vcn;
  359. CLST Tovcn = to_eat + len;
  360. should_add_tail = Tovcn < r->len;
  361. if (should_add_tail) {
  362. tail_lcn = r->lcn == SPARSE_LCN
  363. ? SPARSE_LCN
  364. : (r->lcn + Tovcn);
  365. tail_vcn = r->vcn + Tovcn;
  366. tail_len = r->len - Tovcn;
  367. }
  368. if (to_eat > 0) {
  369. r->len = to_eat;
  370. inrange = false;
  371. index += 1;
  372. goto requires_new_range;
  373. }
  374. /* lcn should match one were going to add. */
  375. r->lcn = lcn;
  376. }
  377. /*
  378. * If existing range fits then were done.
  379. * Otherwise extend found one and fall back to range jocode.
  380. */
  381. if (r->vcn + r->len < vcn + len)
  382. r->len += len - ((r->vcn + r->len) - vcn);
  383. }
  384. /*
  385. * And normalize it starting from insertion point.
  386. * It's possible that no insertion needed case if
  387. * start point lies within the range of an entry
  388. * that 'index' points to.
  389. */
  390. if (inrange && index > 0)
  391. index -= 1;
  392. run_consolidate(run, index);
  393. run_consolidate(run, index + 1);
  394. /*
  395. * A special case.
  396. * We have to add extra range a tail.
  397. */
  398. if (should_add_tail &&
  399. !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
  400. return false;
  401. return true;
  402. }
  403. /* run_collapse_range
  404. *
  405. * Helper for attr_collapse_range(),
  406. * which is helper for fallocate(collapse_range).
  407. */
  408. bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
  409. {
  410. size_t index, eat;
  411. struct ntfs_run *r, *e, *eat_start, *eat_end;
  412. CLST end;
  413. if (WARN_ON(!run_lookup(run, vcn, &index)))
  414. return true; /* Should never be here. */
  415. e = run->runs + run->count;
  416. r = run->runs + index;
  417. end = vcn + len;
  418. if (vcn > r->vcn) {
  419. if (r->vcn + r->len <= end) {
  420. /* Collapse tail of run .*/
  421. r->len = vcn - r->vcn;
  422. } else if (r->lcn == SPARSE_LCN) {
  423. /* Collapse a middle part of sparsed run. */
  424. r->len -= len;
  425. } else {
  426. /* Collapse a middle part of normal run, split. */
  427. if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
  428. return false;
  429. return run_collapse_range(run, vcn, len);
  430. }
  431. r += 1;
  432. }
  433. eat_start = r;
  434. eat_end = r;
  435. for (; r < e; r++) {
  436. CLST d;
  437. if (r->vcn >= end) {
  438. r->vcn -= len;
  439. continue;
  440. }
  441. if (r->vcn + r->len <= end) {
  442. /* Eat this run. */
  443. eat_end = r + 1;
  444. continue;
  445. }
  446. d = end - r->vcn;
  447. if (r->lcn != SPARSE_LCN)
  448. r->lcn += d;
  449. r->len -= d;
  450. r->vcn -= len - d;
  451. }
  452. eat = eat_end - eat_start;
  453. memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
  454. run->count -= eat;
  455. return true;
  456. }
  457. /* run_insert_range
  458. *
  459. * Helper for attr_insert_range(),
  460. * which is helper for fallocate(insert_range).
  461. */
  462. bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
  463. {
  464. size_t index;
  465. struct ntfs_run *r, *e;
  466. if (WARN_ON(!run_lookup(run, vcn, &index)))
  467. return false; /* Should never be here. */
  468. e = run->runs + run->count;
  469. r = run->runs + index;
  470. if (vcn > r->vcn)
  471. r += 1;
  472. for (; r < e; r++)
  473. r->vcn += len;
  474. r = run->runs + index;
  475. if (vcn > r->vcn) {
  476. /* split fragment. */
  477. CLST len1 = vcn - r->vcn;
  478. CLST len2 = r->len - len1;
  479. CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
  480. r->len = len1;
  481. if (!run_add_entry(run, vcn + len, lcn2, len2, false))
  482. return false;
  483. }
  484. if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
  485. return false;
  486. return true;
  487. }
  488. /*
  489. * run_get_entry - Return index-th mapped region.
  490. */
  491. bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
  492. CLST *lcn, CLST *len)
  493. {
  494. const struct ntfs_run *r;
  495. if (index >= run->count)
  496. return false;
  497. r = run->runs + index;
  498. if (!r->len)
  499. return false;
  500. if (vcn)
  501. *vcn = r->vcn;
  502. if (lcn)
  503. *lcn = r->lcn;
  504. if (len)
  505. *len = r->len;
  506. return true;
  507. }
  508. /*
  509. * run_packed_size - Calculate the size of packed int64.
  510. */
  511. #ifdef __BIG_ENDIAN
  512. static inline int run_packed_size(const s64 n)
  513. {
  514. const u8 *p = (const u8 *)&n + sizeof(n) - 1;
  515. if (n >= 0) {
  516. if (p[-7] || p[-6] || p[-5] || p[-4])
  517. p -= 4;
  518. if (p[-3] || p[-2])
  519. p -= 2;
  520. if (p[-1])
  521. p -= 1;
  522. if (p[0] & 0x80)
  523. p -= 1;
  524. } else {
  525. if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
  526. p[-4] != 0xff)
  527. p -= 4;
  528. if (p[-3] != 0xff || p[-2] != 0xff)
  529. p -= 2;
  530. if (p[-1] != 0xff)
  531. p -= 1;
  532. if (!(p[0] & 0x80))
  533. p -= 1;
  534. }
  535. return (const u8 *)&n + sizeof(n) - p;
  536. }
  537. /* Full trusted function. It does not check 'size' for errors. */
  538. static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
  539. {
  540. const u8 *p = (u8 *)&v;
  541. switch (size) {
  542. case 8:
  543. run_buf[7] = p[0];
  544. fallthrough;
  545. case 7:
  546. run_buf[6] = p[1];
  547. fallthrough;
  548. case 6:
  549. run_buf[5] = p[2];
  550. fallthrough;
  551. case 5:
  552. run_buf[4] = p[3];
  553. fallthrough;
  554. case 4:
  555. run_buf[3] = p[4];
  556. fallthrough;
  557. case 3:
  558. run_buf[2] = p[5];
  559. fallthrough;
  560. case 2:
  561. run_buf[1] = p[6];
  562. fallthrough;
  563. case 1:
  564. run_buf[0] = p[7];
  565. }
  566. }
  567. /* Full trusted function. It does not check 'size' for errors. */
  568. static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
  569. {
  570. u8 *p = (u8 *)&v;
  571. switch (size) {
  572. case 8:
  573. p[0] = run_buf[7];
  574. fallthrough;
  575. case 7:
  576. p[1] = run_buf[6];
  577. fallthrough;
  578. case 6:
  579. p[2] = run_buf[5];
  580. fallthrough;
  581. case 5:
  582. p[3] = run_buf[4];
  583. fallthrough;
  584. case 4:
  585. p[4] = run_buf[3];
  586. fallthrough;
  587. case 3:
  588. p[5] = run_buf[2];
  589. fallthrough;
  590. case 2:
  591. p[6] = run_buf[1];
  592. fallthrough;
  593. case 1:
  594. p[7] = run_buf[0];
  595. }
  596. return v;
  597. }
  598. #else
  599. static inline int run_packed_size(const s64 n)
  600. {
  601. const u8 *p = (const u8 *)&n;
  602. if (n >= 0) {
  603. if (p[7] || p[6] || p[5] || p[4])
  604. p += 4;
  605. if (p[3] || p[2])
  606. p += 2;
  607. if (p[1])
  608. p += 1;
  609. if (p[0] & 0x80)
  610. p += 1;
  611. } else {
  612. if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
  613. p[4] != 0xff)
  614. p += 4;
  615. if (p[3] != 0xff || p[2] != 0xff)
  616. p += 2;
  617. if (p[1] != 0xff)
  618. p += 1;
  619. if (!(p[0] & 0x80))
  620. p += 1;
  621. }
  622. return 1 + p - (const u8 *)&n;
  623. }
  624. /* Full trusted function. It does not check 'size' for errors. */
  625. static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
  626. {
  627. const u8 *p = (u8 *)&v;
  628. /* memcpy( run_buf, &v, size); Is it faster? */
  629. switch (size) {
  630. case 8:
  631. run_buf[7] = p[7];
  632. fallthrough;
  633. case 7:
  634. run_buf[6] = p[6];
  635. fallthrough;
  636. case 6:
  637. run_buf[5] = p[5];
  638. fallthrough;
  639. case 5:
  640. run_buf[4] = p[4];
  641. fallthrough;
  642. case 4:
  643. run_buf[3] = p[3];
  644. fallthrough;
  645. case 3:
  646. run_buf[2] = p[2];
  647. fallthrough;
  648. case 2:
  649. run_buf[1] = p[1];
  650. fallthrough;
  651. case 1:
  652. run_buf[0] = p[0];
  653. }
  654. }
  655. /* full trusted function. It does not check 'size' for errors */
  656. static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
  657. {
  658. u8 *p = (u8 *)&v;
  659. /* memcpy( &v, run_buf, size); Is it faster? */
  660. switch (size) {
  661. case 8:
  662. p[7] = run_buf[7];
  663. fallthrough;
  664. case 7:
  665. p[6] = run_buf[6];
  666. fallthrough;
  667. case 6:
  668. p[5] = run_buf[5];
  669. fallthrough;
  670. case 5:
  671. p[4] = run_buf[4];
  672. fallthrough;
  673. case 4:
  674. p[3] = run_buf[3];
  675. fallthrough;
  676. case 3:
  677. p[2] = run_buf[2];
  678. fallthrough;
  679. case 2:
  680. p[1] = run_buf[1];
  681. fallthrough;
  682. case 1:
  683. p[0] = run_buf[0];
  684. }
  685. return v;
  686. }
  687. #endif
  688. /*
  689. * run_pack - Pack runs into buffer.
  690. *
  691. * packed_vcns - How much runs we have packed.
  692. * packed_size - How much bytes we have used run_buf.
  693. */
  694. int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
  695. u32 run_buf_size, CLST *packed_vcns)
  696. {
  697. CLST next_vcn, vcn, lcn;
  698. CLST prev_lcn = 0;
  699. CLST evcn1 = svcn + len;
  700. const struct ntfs_run *r, *r_end;
  701. int packed_size = 0;
  702. size_t i;
  703. s64 dlcn;
  704. int offset_size, size_size, tmp;
  705. *packed_vcns = 0;
  706. if (!len)
  707. goto out;
  708. /* Check all required entries [svcn, encv1) available. */
  709. if (!run_lookup(run, svcn, &i))
  710. return -ENOENT;
  711. r_end = run->runs + run->count;
  712. r = run->runs + i;
  713. for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
  714. next_vcn = r->vcn + r->len) {
  715. if (++r >= r_end || r->vcn != next_vcn)
  716. return -ENOENT;
  717. }
  718. /* Repeat cycle above and pack runs. Assume no errors. */
  719. r = run->runs + i;
  720. len = svcn - r->vcn;
  721. vcn = svcn;
  722. lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
  723. len = r->len - len;
  724. for (;;) {
  725. next_vcn = vcn + len;
  726. if (next_vcn > evcn1)
  727. len = evcn1 - vcn;
  728. /* How much bytes required to pack len. */
  729. size_size = run_packed_size(len);
  730. /* offset_size - How much bytes is packed dlcn. */
  731. if (lcn == SPARSE_LCN) {
  732. offset_size = 0;
  733. dlcn = 0;
  734. } else {
  735. /* NOTE: lcn can be less than prev_lcn! */
  736. dlcn = (s64)lcn - prev_lcn;
  737. offset_size = run_packed_size(dlcn);
  738. prev_lcn = lcn;
  739. }
  740. tmp = run_buf_size - packed_size - 2 - offset_size;
  741. if (tmp <= 0)
  742. goto out;
  743. /* Can we store this entire run. */
  744. if (tmp < size_size)
  745. goto out;
  746. if (run_buf) {
  747. /* Pack run header. */
  748. run_buf[0] = ((u8)(size_size | (offset_size << 4)));
  749. run_buf += 1;
  750. /* Pack the length of run. */
  751. run_pack_s64(run_buf, size_size, len);
  752. run_buf += size_size;
  753. /* Pack the offset from previous LCN. */
  754. run_pack_s64(run_buf, offset_size, dlcn);
  755. run_buf += offset_size;
  756. }
  757. packed_size += 1 + offset_size + size_size;
  758. *packed_vcns += len;
  759. if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
  760. goto out;
  761. r += 1;
  762. vcn = r->vcn;
  763. lcn = r->lcn;
  764. len = r->len;
  765. }
  766. out:
  767. /* Store last zero. */
  768. if (run_buf)
  769. run_buf[0] = 0;
  770. return packed_size + 1;
  771. }
  772. /*
  773. * run_unpack - Unpack packed runs from @run_buf.
  774. *
  775. * Return: Error if negative, or real used bytes.
  776. */
  777. int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
  778. CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
  779. int run_buf_size)
  780. {
  781. u64 prev_lcn, vcn64, lcn, next_vcn;
  782. const u8 *run_last, *run_0;
  783. bool is_mft = ino == MFT_REC_MFT;
  784. if (run_buf_size < 0)
  785. return -EINVAL;
  786. /* Check for empty. */
  787. if (evcn + 1 == svcn)
  788. return 0;
  789. if (evcn < svcn)
  790. return -EINVAL;
  791. run_0 = run_buf;
  792. run_last = run_buf + run_buf_size;
  793. prev_lcn = 0;
  794. vcn64 = svcn;
  795. /* Read all runs the chain. */
  796. /* size_size - How much bytes is packed len. */
  797. while (run_buf < run_last) {
  798. /* size_size - How much bytes is packed len. */
  799. u8 size_size = *run_buf & 0xF;
  800. /* offset_size - How much bytes is packed dlcn. */
  801. u8 offset_size = *run_buf++ >> 4;
  802. u64 len;
  803. if (!size_size)
  804. break;
  805. /*
  806. * Unpack runs.
  807. * NOTE: Runs are stored little endian order
  808. * "len" is unsigned value, "dlcn" is signed.
  809. * Large positive number requires to store 5 bytes
  810. * e.g.: 05 FF 7E FF FF 00 00 00
  811. */
  812. if (size_size > 8)
  813. return -EINVAL;
  814. len = run_unpack_s64(run_buf, size_size, 0);
  815. /* Skip size_size. */
  816. run_buf += size_size;
  817. if (!len)
  818. return -EINVAL;
  819. if (!offset_size)
  820. lcn = SPARSE_LCN64;
  821. else if (offset_size <= 8) {
  822. s64 dlcn;
  823. /* Initial value of dlcn is -1 or 0. */
  824. dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
  825. dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
  826. /* Skip offset_size. */
  827. run_buf += offset_size;
  828. if (!dlcn)
  829. return -EINVAL;
  830. lcn = prev_lcn + dlcn;
  831. prev_lcn = lcn;
  832. } else
  833. return -EINVAL;
  834. next_vcn = vcn64 + len;
  835. /* Check boundary. */
  836. if (next_vcn > evcn + 1)
  837. return -EINVAL;
  838. #ifndef CONFIG_NTFS3_64BIT_CLUSTER
  839. if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
  840. ntfs_err(
  841. sbi->sb,
  842. "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
  843. "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
  844. "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
  845. vcn64, lcn, len);
  846. return -EOPNOTSUPP;
  847. }
  848. #endif
  849. if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
  850. /* LCN range is out of volume. */
  851. return -EINVAL;
  852. }
  853. if (!run)
  854. ; /* Called from check_attr(fslog.c) to check run. */
  855. else if (run == RUN_DEALLOCATE) {
  856. /*
  857. * Called from ni_delete_all to free clusters
  858. * without storing in run.
  859. */
  860. if (lcn != SPARSE_LCN64)
  861. mark_as_free_ex(sbi, lcn, len, true);
  862. } else if (vcn64 >= vcn) {
  863. if (!run_add_entry(run, vcn64, lcn, len, is_mft))
  864. return -ENOMEM;
  865. } else if (next_vcn > vcn) {
  866. u64 dlen = vcn - vcn64;
  867. if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
  868. is_mft))
  869. return -ENOMEM;
  870. }
  871. vcn64 = next_vcn;
  872. }
  873. if (vcn64 != evcn + 1) {
  874. /* Not expected length of unpacked runs. */
  875. return -EINVAL;
  876. }
  877. return run_buf - run_0;
  878. }
  879. #ifdef NTFS3_CHECK_FREE_CLST
  880. /*
  881. * run_unpack_ex - Unpack packed runs from "run_buf".
  882. *
  883. * Checks unpacked runs to be used in bitmap.
  884. *
  885. * Return: Error if negative, or real used bytes.
  886. */
  887. int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
  888. CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
  889. int run_buf_size)
  890. {
  891. int ret, err;
  892. CLST next_vcn, lcn, len;
  893. size_t index;
  894. bool ok;
  895. struct wnd_bitmap *wnd;
  896. ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
  897. if (ret <= 0)
  898. return ret;
  899. if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
  900. return ret;
  901. if (ino == MFT_REC_BADCLUST)
  902. return ret;
  903. next_vcn = vcn = svcn;
  904. wnd = &sbi->used.bitmap;
  905. for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
  906. next_vcn <= evcn;
  907. ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
  908. if (!ok || next_vcn != vcn)
  909. return -EINVAL;
  910. next_vcn = vcn + len;
  911. if (lcn == SPARSE_LCN)
  912. continue;
  913. if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
  914. continue;
  915. down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
  916. /* Check for free blocks. */
  917. ok = wnd_is_used(wnd, lcn, len);
  918. up_read(&wnd->rw_lock);
  919. if (ok)
  920. continue;
  921. /* Looks like volume is corrupted. */
  922. ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
  923. if (down_write_trylock(&wnd->rw_lock)) {
  924. /* Mark all zero bits as used in range [lcn, lcn+len). */
  925. CLST i, lcn_f = 0, len_f = 0;
  926. err = 0;
  927. for (i = 0; i < len; i++) {
  928. if (wnd_is_free(wnd, lcn + i, 1)) {
  929. if (!len_f)
  930. lcn_f = lcn + i;
  931. len_f += 1;
  932. } else if (len_f) {
  933. err = wnd_set_used(wnd, lcn_f, len_f);
  934. len_f = 0;
  935. if (err)
  936. break;
  937. }
  938. }
  939. if (len_f)
  940. err = wnd_set_used(wnd, lcn_f, len_f);
  941. up_write(&wnd->rw_lock);
  942. if (err)
  943. return err;
  944. }
  945. }
  946. return ret;
  947. }
  948. #endif
  949. /*
  950. * run_get_highest_vcn
  951. *
  952. * Return the highest vcn from a mapping pairs array
  953. * it used while replaying log file.
  954. */
  955. int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
  956. {
  957. u64 vcn64 = vcn;
  958. u8 size_size;
  959. while ((size_size = *run_buf & 0xF)) {
  960. u8 offset_size = *run_buf++ >> 4;
  961. u64 len;
  962. if (size_size > 8 || offset_size > 8)
  963. return -EINVAL;
  964. len = run_unpack_s64(run_buf, size_size, 0);
  965. if (!len)
  966. return -EINVAL;
  967. run_buf += size_size + offset_size;
  968. vcn64 += len;
  969. #ifndef CONFIG_NTFS3_64BIT_CLUSTER
  970. if (vcn64 > 0x100000000ull)
  971. return -EINVAL;
  972. #endif
  973. }
  974. *highest_vcn = vcn64 - 1;
  975. return 0;
  976. }
  977. /*
  978. * run_clone
  979. *
  980. * Make a copy of run
  981. */
  982. int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
  983. {
  984. size_t bytes = run->count * sizeof(struct ntfs_run);
  985. if (bytes > new_run->allocated) {
  986. struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
  987. if (!new_ptr)
  988. return -ENOMEM;
  989. kvfree(new_run->runs);
  990. new_run->runs = new_ptr;
  991. new_run->allocated = bytes;
  992. }
  993. memcpy(new_run->runs, run->runs, bytes);
  994. new_run->count = run->count;
  995. return 0;
  996. }