file_direct.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013
  4. * Phillip Lougher <[email protected]>
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/vfs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/string.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/mutex.h>
  13. #include "squashfs_fs.h"
  14. #include "squashfs_fs_sb.h"
  15. #include "squashfs_fs_i.h"
  16. #include "squashfs.h"
  17. #include "page_actor.h"
  18. /* Read separately compressed datablock directly into page cache */
  19. int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
  20. int expected)
  21. {
  22. struct inode *inode = target_page->mapping->host;
  23. struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
  24. int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
  25. int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
  26. int start_index = target_page->index & ~mask;
  27. int end_index = start_index | mask;
  28. int i, n, pages, bytes, res = -ENOMEM;
  29. struct page **page;
  30. struct squashfs_page_actor *actor;
  31. void *pageaddr;
  32. if (end_index > file_end)
  33. end_index = file_end;
  34. pages = end_index - start_index + 1;
  35. page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
  36. if (page == NULL)
  37. return res;
  38. /* Try to grab all the pages covered by the Squashfs block */
  39. for (i = 0, n = start_index; n <= end_index; n++) {
  40. page[i] = (n == target_page->index) ? target_page :
  41. grab_cache_page_nowait(target_page->mapping, n);
  42. if (page[i] == NULL)
  43. continue;
  44. if (PageUptodate(page[i])) {
  45. unlock_page(page[i]);
  46. put_page(page[i]);
  47. continue;
  48. }
  49. i++;
  50. }
  51. pages = i;
  52. /*
  53. * Create a "page actor" which will kmap and kunmap the
  54. * page cache pages appropriately within the decompressor
  55. */
  56. actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
  57. if (actor == NULL)
  58. goto out;
  59. /* Decompress directly into the page cache buffers */
  60. res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
  61. squashfs_page_actor_free(actor);
  62. if (res < 0)
  63. goto mark_errored;
  64. if (res != expected) {
  65. res = -EIO;
  66. goto mark_errored;
  67. }
  68. /* Last page (if present) may have trailing bytes not filled */
  69. bytes = res % PAGE_SIZE;
  70. if (page[pages - 1]->index == end_index && bytes) {
  71. pageaddr = kmap_local_page(page[pages - 1]);
  72. memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
  73. kunmap_local(pageaddr);
  74. }
  75. /* Mark pages as uptodate, unlock and release */
  76. for (i = 0; i < pages; i++) {
  77. flush_dcache_page(page[i]);
  78. SetPageUptodate(page[i]);
  79. unlock_page(page[i]);
  80. if (page[i] != target_page)
  81. put_page(page[i]);
  82. }
  83. kfree(page);
  84. return 0;
  85. mark_errored:
  86. /* Decompression failed, mark pages as errored. Target_page is
  87. * dealt with by the caller
  88. */
  89. for (i = 0; i < pages; i++) {
  90. if (page[i] == NULL || page[i] == target_page)
  91. continue;
  92. flush_dcache_page(page[i]);
  93. SetPageError(page[i]);
  94. unlock_page(page[i]);
  95. put_page(page[i]);
  96. }
  97. out:
  98. kfree(page);
  99. return res;
  100. }