lzo.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2008 Oracle. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/mm.h>
  8. #include <linux/init.h>
  9. #include <linux/err.h>
  10. #include <linux/sched.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/bio.h>
  13. #include <linux/lzo.h>
  14. #include <linux/refcount.h>
  15. #include "compression.h"
  16. #include "ctree.h"
  17. #define LZO_LEN 4
  18. /*
  19. * Btrfs LZO compression format
  20. *
  21. * Regular and inlined LZO compressed data extents consist of:
  22. *
  23. * 1. Header
  24. * Fixed size. LZO_LEN (4) bytes long, LE32.
  25. * Records the total size (including the header) of compressed data.
  26. *
  27. * 2. Segment(s)
  28. * Variable size. Each segment includes one segment header, followed by data
  29. * payload.
  30. * One regular LZO compressed extent can have one or more segments.
  31. * For inlined LZO compressed extent, only one segment is allowed.
  32. * One segment represents at most one sector of uncompressed data.
  33. *
  34. * 2.1 Segment header
  35. * Fixed size. LZO_LEN (4) bytes long, LE32.
  36. * Records the total size of the segment (not including the header).
  37. * Segment header never crosses sector boundary, thus it's possible to
  38. * have at most 3 padding zeros at the end of the sector.
  39. *
  40. * 2.2 Data Payload
  41. * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
  42. * which is 4419 for a 4KiB sectorsize.
  43. *
  44. * Example with 4K sectorsize:
  45. * Page 1:
  46. * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
  47. * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
  48. * ...
  49. * 0x0ff0 | SegHdr N | Data payload N ... |00|
  50. * ^^ padding zeros
  51. * Page 2:
  52. * 0x1000 | SegHdr N+1| Data payload N+1 ... |
  53. */
  54. #define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
  55. #define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
  56. struct workspace {
  57. void *mem;
  58. void *buf; /* where decompressed data goes */
  59. void *cbuf; /* where compressed data goes */
  60. struct list_head list;
  61. };
  62. static struct workspace_manager wsm;
  63. void lzo_free_workspace(struct list_head *ws)
  64. {
  65. struct workspace *workspace = list_entry(ws, struct workspace, list);
  66. kvfree(workspace->buf);
  67. kvfree(workspace->cbuf);
  68. kvfree(workspace->mem);
  69. kfree(workspace);
  70. }
  71. struct list_head *lzo_alloc_workspace(unsigned int level)
  72. {
  73. struct workspace *workspace;
  74. workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
  75. if (!workspace)
  76. return ERR_PTR(-ENOMEM);
  77. workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  78. workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
  79. workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
  80. if (!workspace->mem || !workspace->buf || !workspace->cbuf)
  81. goto fail;
  82. INIT_LIST_HEAD(&workspace->list);
  83. return &workspace->list;
  84. fail:
  85. lzo_free_workspace(&workspace->list);
  86. return ERR_PTR(-ENOMEM);
  87. }
  88. static inline void write_compress_length(char *buf, size_t len)
  89. {
  90. __le32 dlen;
  91. dlen = cpu_to_le32(len);
  92. memcpy(buf, &dlen, LZO_LEN);
  93. }
  94. static inline size_t read_compress_length(const char *buf)
  95. {
  96. __le32 dlen;
  97. memcpy(&dlen, buf, LZO_LEN);
  98. return le32_to_cpu(dlen);
  99. }
  100. /*
  101. * Will do:
  102. *
  103. * - Write a segment header into the destination
  104. * - Copy the compressed buffer into the destination
  105. * - Make sure we have enough space in the last sector to fit a segment header
  106. * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
  107. *
  108. * Will allocate new pages when needed.
  109. */
  110. static int copy_compressed_data_to_page(char *compressed_data,
  111. size_t compressed_size,
  112. struct page **out_pages,
  113. unsigned long max_nr_page,
  114. u32 *cur_out,
  115. const u32 sectorsize)
  116. {
  117. u32 sector_bytes_left;
  118. u32 orig_out;
  119. struct page *cur_page;
  120. char *kaddr;
  121. if ((*cur_out / PAGE_SIZE) >= max_nr_page)
  122. return -E2BIG;
  123. /*
  124. * We never allow a segment header crossing sector boundary, previous
  125. * run should ensure we have enough space left inside the sector.
  126. */
  127. ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
  128. cur_page = out_pages[*cur_out / PAGE_SIZE];
  129. /* Allocate a new page */
  130. if (!cur_page) {
  131. cur_page = alloc_page(GFP_NOFS);
  132. if (!cur_page)
  133. return -ENOMEM;
  134. out_pages[*cur_out / PAGE_SIZE] = cur_page;
  135. }
  136. kaddr = kmap_local_page(cur_page);
  137. write_compress_length(kaddr + offset_in_page(*cur_out),
  138. compressed_size);
  139. *cur_out += LZO_LEN;
  140. orig_out = *cur_out;
  141. /* Copy compressed data */
  142. while (*cur_out - orig_out < compressed_size) {
  143. u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
  144. orig_out + compressed_size - *cur_out);
  145. kunmap_local(kaddr);
  146. if ((*cur_out / PAGE_SIZE) >= max_nr_page)
  147. return -E2BIG;
  148. cur_page = out_pages[*cur_out / PAGE_SIZE];
  149. /* Allocate a new page */
  150. if (!cur_page) {
  151. cur_page = alloc_page(GFP_NOFS);
  152. if (!cur_page)
  153. return -ENOMEM;
  154. out_pages[*cur_out / PAGE_SIZE] = cur_page;
  155. }
  156. kaddr = kmap_local_page(cur_page);
  157. memcpy(kaddr + offset_in_page(*cur_out),
  158. compressed_data + *cur_out - orig_out, copy_len);
  159. *cur_out += copy_len;
  160. }
  161. /*
  162. * Check if we can fit the next segment header into the remaining space
  163. * of the sector.
  164. */
  165. sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
  166. if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
  167. goto out;
  168. /* The remaining size is not enough, pad it with zeros */
  169. memset(kaddr + offset_in_page(*cur_out), 0,
  170. sector_bytes_left);
  171. *cur_out += sector_bytes_left;
  172. out:
  173. kunmap_local(kaddr);
  174. return 0;
  175. }
  176. int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
  177. u64 start, struct page **pages, unsigned long *out_pages,
  178. unsigned long *total_in, unsigned long *total_out)
  179. {
  180. struct workspace *workspace = list_entry(ws, struct workspace, list);
  181. const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
  182. struct page *page_in = NULL;
  183. char *sizes_ptr;
  184. const unsigned long max_nr_page = *out_pages;
  185. int ret = 0;
  186. /* Points to the file offset of input data */
  187. u64 cur_in = start;
  188. /* Points to the current output byte */
  189. u32 cur_out = 0;
  190. u32 len = *total_out;
  191. ASSERT(max_nr_page > 0);
  192. *out_pages = 0;
  193. *total_out = 0;
  194. *total_in = 0;
  195. /*
  196. * Skip the header for now, we will later come back and write the total
  197. * compressed size
  198. */
  199. cur_out += LZO_LEN;
  200. while (cur_in < start + len) {
  201. char *data_in;
  202. const u32 sectorsize_mask = sectorsize - 1;
  203. u32 sector_off = (cur_in - start) & sectorsize_mask;
  204. u32 in_len;
  205. size_t out_len;
  206. /* Get the input page first */
  207. if (!page_in) {
  208. page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
  209. ASSERT(page_in);
  210. }
  211. /* Compress at most one sector of data each time */
  212. in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
  213. ASSERT(in_len);
  214. data_in = kmap_local_page(page_in);
  215. ret = lzo1x_1_compress(data_in +
  216. offset_in_page(cur_in), in_len,
  217. workspace->cbuf, &out_len,
  218. workspace->mem);
  219. kunmap_local(data_in);
  220. if (ret < 0) {
  221. pr_debug("BTRFS: lzo in loop returned %d\n", ret);
  222. ret = -EIO;
  223. goto out;
  224. }
  225. ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
  226. pages, max_nr_page,
  227. &cur_out, sectorsize);
  228. if (ret < 0)
  229. goto out;
  230. cur_in += in_len;
  231. /*
  232. * Check if we're making it bigger after two sectors. And if
  233. * it is so, give up.
  234. */
  235. if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
  236. ret = -E2BIG;
  237. goto out;
  238. }
  239. /* Check if we have reached page boundary */
  240. if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
  241. put_page(page_in);
  242. page_in = NULL;
  243. }
  244. }
  245. /* Store the size of all chunks of compressed data */
  246. sizes_ptr = kmap_local_page(pages[0]);
  247. write_compress_length(sizes_ptr, cur_out);
  248. kunmap_local(sizes_ptr);
  249. ret = 0;
  250. *total_out = cur_out;
  251. *total_in = cur_in - start;
  252. out:
  253. if (page_in)
  254. put_page(page_in);
  255. *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
  256. return ret;
  257. }
  258. /*
  259. * Copy the compressed segment payload into @dest.
  260. *
  261. * For the payload there will be no padding, just need to do page switching.
  262. */
  263. static void copy_compressed_segment(struct compressed_bio *cb,
  264. char *dest, u32 len, u32 *cur_in)
  265. {
  266. u32 orig_in = *cur_in;
  267. while (*cur_in < orig_in + len) {
  268. struct page *cur_page;
  269. u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
  270. orig_in + len - *cur_in);
  271. ASSERT(copy_len);
  272. cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
  273. memcpy_from_page(dest + *cur_in - orig_in, cur_page,
  274. offset_in_page(*cur_in), copy_len);
  275. *cur_in += copy_len;
  276. }
  277. }
  278. int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
  279. {
  280. struct workspace *workspace = list_entry(ws, struct workspace, list);
  281. const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
  282. const u32 sectorsize = fs_info->sectorsize;
  283. char *kaddr;
  284. int ret;
  285. /* Compressed data length, can be unaligned */
  286. u32 len_in;
  287. /* Offset inside the compressed data */
  288. u32 cur_in = 0;
  289. /* Bytes decompressed so far */
  290. u32 cur_out = 0;
  291. kaddr = kmap_local_page(cb->compressed_pages[0]);
  292. len_in = read_compress_length(kaddr);
  293. kunmap_local(kaddr);
  294. cur_in += LZO_LEN;
  295. /*
  296. * LZO header length check
  297. *
  298. * The total length should not exceed the maximum extent length,
  299. * and all sectors should be used.
  300. * If this happens, it means the compressed extent is corrupted.
  301. */
  302. if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
  303. round_up(len_in, sectorsize) < cb->compressed_len) {
  304. btrfs_err(fs_info,
  305. "invalid lzo header, lzo len %u compressed len %u",
  306. len_in, cb->compressed_len);
  307. return -EUCLEAN;
  308. }
  309. /* Go through each lzo segment */
  310. while (cur_in < len_in) {
  311. struct page *cur_page;
  312. /* Length of the compressed segment */
  313. u32 seg_len;
  314. u32 sector_bytes_left;
  315. size_t out_len = lzo1x_worst_compress(sectorsize);
  316. /*
  317. * We should always have enough space for one segment header
  318. * inside current sector.
  319. */
  320. ASSERT(cur_in / sectorsize ==
  321. (cur_in + LZO_LEN - 1) / sectorsize);
  322. cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
  323. ASSERT(cur_page);
  324. kaddr = kmap_local_page(cur_page);
  325. seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
  326. kunmap_local(kaddr);
  327. cur_in += LZO_LEN;
  328. if (seg_len > WORKSPACE_CBUF_LENGTH) {
  329. /*
  330. * seg_len shouldn't be larger than we have allocated
  331. * for workspace->cbuf
  332. */
  333. btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
  334. seg_len);
  335. ret = -EIO;
  336. goto out;
  337. }
  338. /* Copy the compressed segment payload into workspace */
  339. copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
  340. /* Decompress the data */
  341. ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
  342. workspace->buf, &out_len);
  343. if (ret != LZO_E_OK) {
  344. btrfs_err(fs_info, "failed to decompress");
  345. ret = -EIO;
  346. goto out;
  347. }
  348. /* Copy the data into inode pages */
  349. ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
  350. cur_out += out_len;
  351. /* All data read, exit */
  352. if (ret == 0)
  353. goto out;
  354. ret = 0;
  355. /* Check if the sector has enough space for a segment header */
  356. sector_bytes_left = sectorsize - (cur_in % sectorsize);
  357. if (sector_bytes_left >= LZO_LEN)
  358. continue;
  359. /* Skip the padding zeros */
  360. cur_in += sector_bytes_left;
  361. }
  362. out:
  363. if (!ret)
  364. zero_fill_bio(cb->orig_bio);
  365. return ret;
  366. }
  367. int lzo_decompress(struct list_head *ws, unsigned char *data_in,
  368. struct page *dest_page, unsigned long start_byte, size_t srclen,
  369. size_t destlen)
  370. {
  371. struct workspace *workspace = list_entry(ws, struct workspace, list);
  372. size_t in_len;
  373. size_t out_len;
  374. size_t max_segment_len = WORKSPACE_BUF_LENGTH;
  375. int ret = 0;
  376. char *kaddr;
  377. unsigned long bytes;
  378. if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
  379. return -EUCLEAN;
  380. in_len = read_compress_length(data_in);
  381. if (in_len != srclen)
  382. return -EUCLEAN;
  383. data_in += LZO_LEN;
  384. in_len = read_compress_length(data_in);
  385. if (in_len != srclen - LZO_LEN * 2) {
  386. ret = -EUCLEAN;
  387. goto out;
  388. }
  389. data_in += LZO_LEN;
  390. out_len = PAGE_SIZE;
  391. ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
  392. if (ret != LZO_E_OK) {
  393. pr_warn("BTRFS: decompress failed!\n");
  394. ret = -EIO;
  395. goto out;
  396. }
  397. if (out_len < start_byte) {
  398. ret = -EIO;
  399. goto out;
  400. }
  401. /*
  402. * the caller is already checking against PAGE_SIZE, but lets
  403. * move this check closer to the memcpy/memset
  404. */
  405. destlen = min_t(unsigned long, destlen, PAGE_SIZE);
  406. bytes = min_t(unsigned long, destlen, out_len - start_byte);
  407. kaddr = kmap_local_page(dest_page);
  408. memcpy(kaddr, workspace->buf + start_byte, bytes);
  409. /*
  410. * btrfs_getblock is doing a zero on the tail of the page too,
  411. * but this will cover anything missing from the decompressed
  412. * data.
  413. */
  414. if (bytes < destlen)
  415. memset(kaddr+bytes, 0, destlen-bytes);
  416. kunmap_local(kaddr);
  417. out:
  418. return ret;
  419. }
  420. const struct btrfs_compress_op btrfs_lzo_compress = {
  421. .workspace_manager = &wsm,
  422. .max_level = 1,
  423. .default_level = 1,
  424. };