From 24e49ee3d76b70853a96520e46b8837e5eae65b2 Mon Sep 17 00:00:00 2001
From: Nitin Gupta <nitin.m.gupta@oracle.com>
Date: Wed, 30 Mar 2016 11:17:13 -0700
Subject: sparc64: Reduce TLB flushes during hugepte changes

During hugepage map/unmap, TSB and TLB flushes are currently
issued at every PAGE_SIZE'd boundary which is unnecessary.
We now issue the flush at REAL_HPAGE_SIZE boundaries only.

Without this patch workloads which unmap a large hugepage
backed VMA region get CPU lockups due to excessive TLB
flush calls.

Orabug: 22365539, 22643230, 22995196

Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/mm/tlb.c | 25 ++++++++++++++++++-------
 1 file changed, 18 insertions(+), 7 deletions(-)

(limited to 'arch/sparc/mm/tlb.c')

diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 9df2190c097e..f81cd9736700 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
 }
 
 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
-			      bool exec)
+			      bool exec, bool huge)
 {
 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 	unsigned long nr;
@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
 	}
 
 	if (!tb->active) {
-		flush_tsb_user_page(mm, vaddr);
+		flush_tsb_user_page(mm, vaddr, huge);
 		global_flush_tlb_page(mm, vaddr);
 		goto out;
 	}
 
-	if (nr == 0)
+	if (nr == 0) {
 		tb->mm = mm;
+		tb->huge = huge;
+	}
+
+	if (tb->huge != huge) {
+		flush_tlb_pending();
+		tb->huge = huge;
+		nr = 0;
+	}
 
 	tb->vaddrs[nr] = vaddr;
 	tb->tlb_nr = ++nr;
@@ -104,6 +112,8 @@ out:
 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 		   pte_t *ptep, pte_t orig, int fullmm)
 {
+	bool huge = is_hugetlb_pte(orig);
+
 	if (tlb_type != hypervisor &&
 	    pte_dirty(orig)) {
 		unsigned long paddr, pfn = pte_pfn(orig);
@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 
 no_cache_flush:
 	if (!fullmm)
-		tlb_batch_add_one(mm, vaddr, pte_exec(orig));
+		tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
 		if (pte_val(*pte) & _PAGE_VALID) {
 			bool exec = pte_exec(*pte);
 
-			tlb_batch_add_one(mm, vaddr, exec);
+			tlb_batch_add_one(mm, vaddr, exec, false);
 		}
 		pte++;
 		vaddr += PAGE_SIZE;
@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			pte_t orig_pte = __pte(pmd_val(orig));
 			bool exec = pte_exec(orig_pte);
 
-			tlb_batch_add_one(mm, addr, exec);
-			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
+			tlb_batch_add_one(mm, addr, exec, true);
+			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
+					true);
 		} else {
 			tlb_batch_pmd_scan(mm, addr, orig);
 		}
-- 
cgit v1.2.3