Merge tag 'powerpc-4.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:

 - Kconfig: remove BE-only platforms from LE kernel build from Boqun
   Feng
 - Refresh ps3_defconfig from Geoff Levand
 - Emit GNU & SysV hashes for the vdso from Michael Ellerman
 - Define an enum for the bolted SLB indexes from Anshuman Khandual
 - Use a local to avoid multiple calls to get_slb_shadow() from Michael
   Ellerman
 - Add gettimeofday() benchmark from Michael Neuling
 - Avoid link stack corruption in __get_datapage() from Michael Neuling
 - Add virt_to_pfn and use this instead of opencoding from Aneesh Kumar
   K.V
 - Add ppc64le_defconfig from Michael Ellerman
 - pseries: extract of_helpers module from Andy Shevchenko
 - Correct string length in pseries_of_derive_parent() from Nathan
   Fontenot
 - Free the MSI bitmap if it was slab allocated from Denis Kirjanov
 - Shorten irq_chip name for the SIU from Christophe Leroy
 - Wait 1s for secondaries to enter OPAL during kexec from Samuel
   Mendoza-Jonas
 - Fix _ALIGN_* errors due to type difference, from Aneesh Kumar K.V
 - powerpc/pseries/hvcserver: don't memset pi_buff if it is null from
   Colin Ian King
 - Disable hugepd for 64K page size, from Aneesh Kumar K.V
 - Differentiate between hugetlb and THP during page walk from Aneesh
   Kumar K.V
 - Make PCI non-optional for pseries from Michael Ellerman
 - Individual System V IPC system calls from Sam bobroff
 - Add selftest of unmuxed IPC calls from Michael Ellerman
 - discard .exit.data at runtime from Stephen Rothwell
 - Delete old orphaned PrPMC 280/2800 DTS and boot file, from Paul
   Gortmaker
 - Use of_get_next_parent to simplify code from Christophe Jaillet
 - Paginate some xmon output from Sam bobroff
 - Add some more elements to the xmon PACA dump from Michael Ellerman
 - Allow the tm-syscall selftest to build with old headers from Michael
   Ellerman
 - Run EBB selftests only on POWER8 from Denis Kirjanov
 - Drop CONFIG_TUNE_CELL in favour of CONFIG_CELL_CPU from Michael
   Ellerman
 - Avoid reference to potentially freed memory in prom.c from Christophe
   Jaillet
 - Quieten boot wrapper output with run_cmd from Geoff Levand
 - EEH fixes and cleanups from Gavin Shan
 - Fix recursive fenced PHB on Broadcom shiner adapter from Gavin Shan
 - Use of_get_next_parent() in of_get_ibm_chip_id() from Michael
   Ellerman
 - Fix section mismatch warning in msi_bitmap_alloc() from Denis
   Kirjanov
 - Fix ps3-lpm white space from Rudhresh Kumar J
 - Fix ps3-vuart null dereference from Colin King
 - nvram: Add missing kfree in error path from Christophe Jaillet
 - nvram: Fix function name in some errors messages, from Christophe
   Jaillet
 - drivers/macintosh: adb: fix misleading Kconfig help text from Aaro
   Koskinen
 - agp/uninorth: fix a memleak in create_gatt_table from Denis Kirjanov
 - cxl: Free virtual PHB when removing from Andrew Donnellan
 - scripts/kconfig/Makefile: Allow KBUILD_DEFCONFIG to be a target from
   Michael Ellerman
 - scripts/kconfig/Makefile: Fix KBUILD_DEFCONFIG check when building
   with O= from Michael Ellerman
 - Freescale updates from Scott: Highlights include 64-bit book3e
   kexec/kdump support, a rework of the qoriq clock driver, device tree
   changes including qoriq fman nodes, support for a new 85xx board, and
   some fixes.
 - MPC5xxx updates from Anatolij: Highlights include a driver for
   MPC512x LocalPlus Bus FIFO with its device tree binding
   documentation, mpc512x device tree updates and some minor fixes.

* tag 'powerpc-4.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (106 commits)
  powerpc/msi: Fix section mismatch warning in msi_bitmap_alloc()
  powerpc/prom: Use of_get_next_parent() in of_get_ibm_chip_id()
  powerpc/pseries: Correct string length in pseries_of_derive_parent()
  powerpc/e6500: hw tablewalk: make sure we invalidate and write to the same tlb entry
  powerpc/mpc85xx: Add FSL QorIQ DPAA FMan support to the SoC device tree(s)
  powerpc/mpc85xx: Create dts components for the FSL QorIQ DPAA FMan
  powerpc/fsl: Add #clock-cells and clockgen label to clockgen nodes
  powerpc: handle error case in cpm_muram_alloc()
  powerpc: mpic: use IRQCHIP_SKIP_SET_WAKE instead of redundant mpic_irq_set_wake
  powerpc/book3e-64: Enable kexec
  powerpc/book3e-64/kexec: Set "r4 = 0" when entering spinloop
  powerpc/booke: Only use VIRT_PHYS_OFFSET on booke32
  powerpc/book3e-64/kexec: Enable SMP release
  powerpc/book3e-64/kexec: create an identity TLB mapping
  powerpc/book3e-64: Don't limit paca to 256 MiB
  powerpc/book3e/kdump: Enable crash_kexec_wait_realmode
  powerpc/book3e: support CONFIG_RELOCATABLE
  powerpc/booke64: Fix args to copy_and_flush
  powerpc/book3e-64: rename interrupt_end_book3e with __end_interrupts
  powerpc/e6500: kexec: Handle hardware threads
  ...
This commit is contained in:
Linus Torvalds
2015-11-05 23:38:43 -08:00
311 changed files with 6551 additions and 1770 deletions

View File

@@ -141,8 +141,6 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
tlbcam_addrs[index].start = virt;
tlbcam_addrs[index].limit = virt + size - 1;
tlbcam_addrs[index].phys = phys;
loadcam_entry(index);
}
unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
@@ -171,7 +169,8 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
}
static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long ram, int max_cam_idx)
unsigned long ram, int max_cam_idx,
bool dryrun)
{
int i;
unsigned long amount_mapped = 0;
@@ -181,13 +180,20 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys);
settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
if (!dryrun)
settlbcam(i, virt, phys, cam_sz,
pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
virt += cam_sz;
phys += cam_sz;
}
if (dryrun)
return amount_mapped;
loadcam_multi(0, i, max_cam_idx);
tlbcam_index = i;
#ifdef CONFIG_PPC64
@@ -199,12 +205,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
return amount_mapped;
}
unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun)
{
unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr;
return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx);
return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun);
}
#ifdef CONFIG_PPC32
@@ -235,7 +241,7 @@ void __init adjust_total_lowmem(void)
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false);
restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: ");
@@ -303,10 +309,12 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
n = switch_to_as1();
/* map a 64M area for the second relocation */
if (memstart_addr > start)
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
false);
else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM);
0x4000000, CONFIG_LOWMEM_CAM_NUM,
false);
restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */
panic("Relocation error");

View File

@@ -994,6 +994,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap,
unsigned long flags)
{
bool is_thp;
enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
@@ -1068,7 +1069,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
rc = 1;
@@ -1088,7 +1089,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
}
if (hugeshift) {
if (pmd_trans_huge(*(pmd_t *)ptep))
if (is_thp)
rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
trap, flags, ssize, psize);
#ifdef CONFIG_HUGETLB_PAGE
@@ -1243,7 +1244,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
if (!ptep)
goto out_exit;

View File

@@ -89,6 +89,25 @@ int pgd_huge(pgd_t pgd)
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
/*
* This enables us to catch the wrong page directory format
* Moved here so that we can use WARN() in the call.
*/
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
/*
* We should not find this format in page directory, warn otherwise.
*/
is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
#endif
#else
int pmd_huge(pmd_t pmd)
{
@@ -109,7 +128,7 @@ int pgd_huge(pgd_t pgd)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
/* Only called for hugetlbfs pages, hence can ignore THP */
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
}
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -684,13 +703,14 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
bool is_thp;
pte_t *ptep, pte;
unsigned shift;
unsigned long mask, flags;
struct page *page = ERR_PTR(-EINVAL);
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
if (!ptep)
goto no_page;
pte = READ_ONCE(*ptep);
@@ -699,7 +719,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
if (!shift || is_thp)
goto no_page;
if (!pte_present(pte)) {
@@ -956,7 +976,7 @@ void flush_dcache_icache_hugepage(struct page *page)
*/
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned *shift)
bool *is_thp, unsigned *shift)
{
pgd_t pgd, *pgdp;
pud_t pud, *pudp;
@@ -968,6 +988,9 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
if (shift)
*shift = 0;
if (is_thp)
*is_thp = false;
pgdp = pgdir + pgd_index(ea);
pgd = READ_ONCE(*pgdp);
/*
@@ -1015,7 +1038,14 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
if (pmd_none(pmd))
return NULL;
if (pmd_huge(pmd) || pmd_large(pmd)) {
if (pmd_trans_huge(pmd)) {
if (is_thp)
*is_thp = true;
ret_pte = (pte_t *) pmdp;
goto out;
}
if (pmd_huge(pmd)) {
ret_pte = (pte_t *) pmdp;
goto out;
} else if (is_hugepd(__hugepd(pmd_val(pmd))))

View File

@@ -141,7 +141,8 @@ extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
#elif defined(CONFIG_PPC_FSL_BOOK3E)
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx);
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
bool dryrun);
extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
phys_addr_t phys);
#ifdef CONFIG_PPC32
@@ -152,6 +153,7 @@ extern int switch_to_as1(void);
extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
#endif
extern void loadcam_entry(unsigned int index);
extern void loadcam_multi(int first_idx, int num, int tmp_idx);
struct tlbcam {
u32 MAS0;

View File

@@ -276,7 +276,6 @@ static int of_node_to_nid_single(struct device_node *device)
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
struct device_node *tmp;
int nid = -1;
of_node_get(device);
@@ -285,9 +284,7 @@ int of_node_to_nid(struct device_node *device)
if (nid != -1)
break;
tmp = device;
device = of_get_parent(tmp);
of_node_put(tmp);
device = of_get_next_parent(device);
}
of_node_put(device);

View File

@@ -25,6 +25,11 @@
#include <asm/udbg.h>
#include <asm/code-patching.h>
enum slb_index {
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */
KSTACK_INDEX = 2, /* Kernel stack map */
};
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
@@ -41,9 +46,9 @@ static void slb_allocate(unsigned long ea)
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
unsigned long entry)
enum slb_index index)
{
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
}
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -55,39 +60,39 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
static inline void slb_shadow_update(unsigned long ea, int ssize,
unsigned long flags,
unsigned long entry)
enum slb_index index)
{
struct slb_shadow *p = get_slb_shadow();
/*
* Clear the ESID first so the entry is not valid while we are
* updating it. No write barriers are needed here, provided
* we only update the current CPU's SLB shadow buffer.
*/
get_slb_shadow()->save_area[entry].esid = 0;
get_slb_shadow()->save_area[entry].vsid =
cpu_to_be64(mk_vsid_data(ea, ssize, flags));
get_slb_shadow()->save_area[entry].esid =
cpu_to_be64(mk_esid_data(ea, ssize, entry));
p->save_area[index].esid = 0;
p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
}
static inline void slb_shadow_clear(unsigned long entry)
static inline void slb_shadow_clear(enum slb_index index)
{
get_slb_shadow()->save_area[entry].esid = 0;
get_slb_shadow()->save_area[index].esid = 0;
}
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
unsigned long flags,
unsigned long entry)
enum slb_index index)
{
/*
* Updating the shadow buffer before writing the SLB ensures
* we don't get a stale entry here if we get preempted by PHYP
* between these two statements.
*/
slb_shadow_update(ea, ssize, flags, entry);
slb_shadow_update(ea, ssize, flags, index);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, ssize, flags)),
"r" (mk_esid_data(ea, ssize, entry))
"r" (mk_esid_data(ea, ssize, index))
: "memory" );
}
@@ -103,16 +108,16 @@ static void __slb_flush_and_rebolt(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
ksp_vsid_data = 0;
slb_shadow_clear(2);
slb_shadow_clear(KSTACK_INDEX);
} else {
/* Update stack entry; others don't change */
slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
ksp_vsid_data =
be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
}
/* We need to do this all in asm, so we're sure we don't touch
@@ -151,7 +156,7 @@ void slb_vmalloc_update(void)
unsigned long vflags;
vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
slb_flush_and_rebolt();
}
@@ -326,19 +331,19 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
/* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also
* get_paca()->kstack hasn't been initialized yet.
* For secondary cpus, we need to bolt the kernel stack entry now.
*/
slb_shadow_clear(2);
slb_shadow_clear(KSTACK_INDEX);
if (raw_smp_processor_id() != boot_cpuid &&
(get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
create_shadowed_slbe(get_paca()->kstack,
mmu_kernel_ssize, lflags, 2);
mmu_kernel_ssize, lflags, KSTACK_INDEX);
asm volatile("isync":::"memory");
}

View File

@@ -190,6 +190,7 @@ void tlb_flush(struct mmu_gather *tlb)
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
bool is_thp;
int hugepage_shift;
unsigned long flags;
@@ -208,21 +209,21 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
&hugepage_shift);
unsigned long pte;
if (ptep == NULL)
continue;
pte = pte_val(*ptep);
if (hugepage_shift)
if (is_thp)
trace_hugepage_invalidate(start, pte);
if (!(pte & _PAGE_HASHPTE))
continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
if (unlikely(is_thp))
hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
else
hpte_need_flush(mm, start, ptep, pte, 0);
hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);

View File

@@ -68,11 +68,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
ld r14,PACAPGD(r13)
std r15,EX_TLB_R15(r12)
std r10,EX_TLB_CR(r12)
#ifdef CONFIG_PPC_FSL_BOOK3E
std r7,EX_TLB_R7(r12)
#endif
TLB_MISS_PROLOG_STATS
.endm
.macro tlb_epilog_bolted
ld r14,EX_TLB_CR(r12)
#ifdef CONFIG_PPC_FSL_BOOK3E
ld r7,EX_TLB_R7(r12)
#endif
ld r10,EX_TLB_R10(r12)
ld r11,EX_TLB_R11(r12)
ld r13,EX_TLB_R13(r12)
@@ -297,6 +303,7 @@ itlb_miss_fault_bolted:
* r13 = PACA
* r11 = tlb_per_core ptr
* r10 = crap (free to use)
* r7 = esel_next
*/
tlb_miss_common_e6500:
crmove cr2*4+2,cr0*4+2 /* cr2.eq != 0 if kernel address */
@@ -325,7 +332,11 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */
bne 10b
b 1b
.previous
END_FTR_SECTION_IFSET(CPU_FTR_SMT)
lbz r7,TCD_ESEL_NEXT(r11)
BEGIN_FTR_SECTION /* CPU_FTR_SMT */
/*
* Erratum A-008139 says that we can't use tlbwe to change
* an indirect entry in any way (including replacing or
@@ -334,8 +345,7 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */
* with tlbilx before overwriting.
*/
lbz r15,TCD_ESEL_NEXT(r11)
rlwinm r10,r15,16,0xff0000
rlwinm r10,r7,16,0xff0000
oris r10,r10,MAS0_TLBSEL(1)@h
mtspr SPRN_MAS0,r10
isync
@@ -429,15 +439,14 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
mtspr SPRN_MAS2,r15
tlb_miss_huge_done_e6500:
lbz r15,TCD_ESEL_NEXT(r11)
lbz r16,TCD_ESEL_MAX(r11)
lbz r14,TCD_ESEL_FIRST(r11)
rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
addi r15,r15,1 /* increment esel_next */
rlwimi r10,r7,16,0x00ff0000 /* insert esel_next into MAS0 */
addi r7,r7,1 /* increment esel_next */
mtspr SPRN_MAS0,r10
cmpw r15,r16
iseleq r15,r14,r15 /* if next == last use first */
stb r15,TCD_ESEL_NEXT(r11)
cmpw r7,r16
iseleq r7,r14,r7 /* if next == last use first */
stb r7,TCD_ESEL_NEXT(r11)
tlbwe

View File

@@ -42,6 +42,7 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/code-patching.h>
#include <asm/cputhreads.h>
#include <asm/hugetlb.h>
#include <asm/paca.h>
@@ -628,10 +629,26 @@ static void early_init_this_mmu(void)
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
int __maybe_unused cpu = smp_processor_id();
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
/*
* Only do the mapping once per core, or else the
* transient mapping would cause problems.
*/
#ifdef CONFIG_SMP
if (cpu != boot_cpuid &&
(cpu != cpu_first_thread_sibling(cpu) ||
cpu == cpu_first_thread_sibling(boot_cpuid)))
map = false;
#endif
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
num_cams, false);
}
#endif
@@ -729,10 +746,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* entries are supported though that may eventually
* change.
*
* on FSL Embedded 64-bit, we adjust the RMA size to match the
* first bolted TLB entry size. We still limit max to 1G even if
* the TLB could cover more. This is due to what the early init
* code is setup to do.
* on FSL Embedded 64-bit, usually all RAM is bolted, but with
* unusual memory sizes it's possible for some RAM to not be mapped
* (such RAM is not used at all by Linux, since we don't support
* highmem on 64-bit). We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one. Additional memblocks
* can only increase, not decrease, the amount that ends up getting
* mapped. We still limit max to 1G even if we'll eventually map
* more. This is due to what the early init code is set up to do.
*
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
@@ -740,8 +761,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
first_memblock_base);
unsigned int num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
#endif

View File

@@ -400,6 +400,7 @@ _GLOBAL(set_context)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
* Must preserve r7, r8, r9, and r10
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -423,4 +424,66 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
tlbwe
isync
blr
/*
* Load multiple TLB entries at once, using an alternate-space
* trampoline so that we don't have to care about whether the same
* TLB entry maps us before and after.
*
* r3 = first entry to write
* r4 = number of entries to write
* r5 = temporary tlb entry
*/
_GLOBAL(loadcam_multi)
mflr r8
/*
* Set up temporary TLB entry that is the same as what we're
* running from, but in AS=1.
*/
bl 1f
1: mflr r6
tlbsx 0,r8
mfspr r6,SPRN_MAS1
ori r6,r6,MAS1_TS
mtspr SPRN_MAS1,r6
mfspr r6,SPRN_MAS0
rlwimi r6,r5,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK
mr r7,r5
mtspr SPRN_MAS0,r6
isync
tlbwe
isync
/* Switch to AS=1 */
mfmsr r6
ori r6,r6,MSR_IS|MSR_DS
mtmsr r6
isync
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
addi r9,r9,1
cmpw r9,r10
mr r3,r9
blt 2b
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
mtmsr r6
isync
li r6,0
mtspr SPRN_MAS1,r6
rlwinm r6,r7,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK
oris r6,r6,MAS0_TLBSEL(1)@h
mtspr SPRN_MAS0,r6
isync
tlbwe
isync
mtlr r8
blr
#endif