On 01/26/2016 08:26, Huacai Chen wrote:
> If a tlb miss triggered when EXL=1, tlb refill exception is treated as
> tlb invalid exception, so tlbp may fails. In this situation, CP0_Index
> register doesn't contain a valid value. This may not be a problem for
> VTLB since it is fully-associative. However, FTLB is set-associative so
> not every tlb entry is valid for a specific address. Thus, we should
> use tlbwr instead of tlbwi when tlbp fails.
>
> There is a similar case for huge page, so build_huge_tlb_write_entry()
> is also modified. If wmode != tlb_random, that means the caller is tlb
> invalid handler, we should select tlbr/tlbi depend on the tlbp result.
This patch triggered instruction bus errors on my Octane (R14000) once it
switched to userland, using PAGE_SIZE_64KB, MAX_ORDER=14, and THP. Without the
patch, that same configuration boots fine, so I suspect this change isn't
appropriate for all systems.
--J
> Signed-off-by: Huacai Chen <chenhc@xxxxxxxxxx>
> ---
> arch/mips/mm/tlbex.c | 31 ++++++++++++++++++++++++++++++-
> 1 file changed, 30 insertions(+), 1 deletion(-)
>
> diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
> index d0975cd..da68ffb 100644
> --- a/arch/mips/mm/tlbex.c
> +++ b/arch/mips/mm/tlbex.c
> @@ -173,7 +173,10 @@ enum label_id {
> label_large_segbits_fault,
> #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
> label_tlb_huge_update,
> + label_tail_huge_miss,
> + label_tail_huge_done,
> #endif
> + label_tail_miss,
> };
>
> UASM_L_LA(_second_part)
> @@ -192,7 +195,10 @@ UASM_L_LA(_r3000_write_probe_fail)
> UASM_L_LA(_large_segbits_fault)
> #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
> UASM_L_LA(_tlb_huge_update)
> +UASM_L_LA(_tail_huge_miss)
> +UASM_L_LA(_tail_huge_done)
> #endif
> +UASM_L_LA(_tail_miss)
>
> static int hazard_instance;
>
> @@ -706,8 +712,24 @@ static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
> uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
> uasm_i_mtc0(p, tmp, C0_PAGEMASK);
>
> - build_tlb_write_entry(p, l, r, wmode);
> + if (wmode == tlb_random) { /* Caller is TLB Refill Handler */
> + build_tlb_write_entry(p, l, r, wmode);
> + build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
> + return;
> + }
> +
> + /* Caller is TLB Load/Store/Modify Handler */
> + uasm_i_mfc0(p, tmp, C0_INDEX);
> + uasm_il_bltz(p, r, tmp, label_tail_huge_miss);
> + uasm_i_nop(p);
> + build_tlb_write_entry(p, l, r, tlb_indexed);
> + uasm_il_b(p, r, label_tail_huge_done);
> + uasm_i_nop(p);
> +
> + uasm_l_tail_huge_miss(l, *p);
> + build_tlb_write_entry(p, l, r, tlb_random);
>
> + uasm_l_tail_huge_done(l, *p);
> build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
> }
>
> @@ -2026,7 +2048,14 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
> uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
> uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
> build_update_entries(p, tmp, ptr);
> + uasm_i_mfc0(p, ptr, C0_INDEX);
> + uasm_il_bltz(p, r, ptr, label_tail_miss);
> + uasm_i_nop(p);
> build_tlb_write_entry(p, l, r, tlb_indexed);
> + uasm_il_b(p, r, label_leave);
> + uasm_i_nop(p);
> + uasm_l_tail_miss(l, *p);
> + build_tlb_write_entry(p, l, r, tlb_random);
> uasm_l_leave(l, *p);
> build_restore_work_registers(p);
> uasm_i_eret(p); /* return from trap */
>