vm_tlb.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Hexagon Virtual Machine TLB functions
  4. *
  5. * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  6. */
  7. /*
  8. * The Hexagon Virtual Machine conceals the real workings of
  9. * the TLB, but there are one or two functions that need to
  10. * be instantiated for it, differently from a native build.
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/sched.h>
  14. #include <asm/page.h>
  15. #include <asm/hexagon_vm.h>
  16. /*
  17. * Initial VM implementation has only one map active at a time, with
  18. * TLB purgings on changes. So either we're nuking the current map,
  19. * or it's a no-op. This operation is messy on true SMPs where other
  20. * processors must be induced to flush the copies in their local TLBs,
  21. * but Hexagon thread-based virtual processors share the same MMU.
  22. */
  23. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  24. unsigned long end)
  25. {
  26. struct mm_struct *mm = vma->vm_mm;
  27. if (mm->context.ptbase == current->active_mm->context.ptbase)
  28. __vmclrmap((void *)start, end - start);
  29. }
  30. /*
  31. * Flush a page from the kernel virtual map - used by highmem
  32. */
  33. void flush_tlb_one(unsigned long vaddr)
  34. {
  35. __vmclrmap((void *)vaddr, PAGE_SIZE);
  36. }
  37. /*
  38. * Flush all TLBs across all CPUs, virtual or real.
  39. * A single Hexagon core has 6 thread contexts but
  40. * only one TLB.
  41. */
  42. void tlb_flush_all(void)
  43. {
  44. /* should probably use that fixaddr end or whateve label */
  45. __vmclrmap(0, 0xffff0000);
  46. }
  47. /*
  48. * Flush TLB entries associated with a given mm_struct mapping.
  49. */
  50. void flush_tlb_mm(struct mm_struct *mm)
  51. {
  52. /* Current Virtual Machine has only one map active at a time */
  53. if (current->active_mm->context.ptbase == mm->context.ptbase)
  54. tlb_flush_all();
  55. }
  56. /*
  57. * Flush TLB state associated with a page of a vma.
  58. */
  59. void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
  60. {
  61. struct mm_struct *mm = vma->vm_mm;
  62. if (mm->context.ptbase == current->active_mm->context.ptbase)
  63. __vmclrmap((void *)vaddr, PAGE_SIZE);
  64. }
  65. /*
  66. * Flush TLB entries associated with a kernel address range.
  67. * Like flush range, but without the check on the vma->vm_mm.
  68. */
  69. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  70. {
  71. __vmclrmap((void *)start, end - start);
  72. }