summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/pasid.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel/pasid.c')
-rw-r--r--drivers/iommu/intel/pasid.c75
1 files changed, 65 insertions, 10 deletions
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index f26cb6195b2c..72646bafc52f 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -24,7 +24,6 @@
/*
* Intel IOMMU system wide PASID name space:
*/
-static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
@@ -231,7 +230,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
return info->pasid_table;
}
-int intel_pasid_get_dev_max_id(struct device *dev)
+static int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
@@ -242,7 +241,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
return info->pasid_table->max_pasid;
}
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
+static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
@@ -259,19 +258,25 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
- spin_lock(&pasid_lock);
+retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
entries = alloc_pgtable_page(info->iommu->node);
- if (!entries) {
- spin_unlock(&pasid_lock);
+ if (!entries)
return NULL;
- }
- WRITE_ONCE(dir[dir_index].val,
- (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
+ /*
+ * The pasid directory table entry won't be freed after
+ * allocation. No worry about the race with free and
+ * clear. However, this entry might be populated by others
+ * while we are preparing it. Use theirs with a retry.
+ */
+ if (cmpxchg64(&dir[dir_index].val, 0ULL,
+ (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
+ free_pgtable_page(entries);
+ goto retry;
+ }
}
- spin_unlock(&pasid_lock);
return &entries[index];
}
@@ -394,6 +399,15 @@ static inline void pasid_set_sre(struct pasid_entry *pe)
}
/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
@@ -412,6 +426,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
}
/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
@@ -493,6 +517,9 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
if (WARN_ON(!pte))
return;
+ if (!(pte->val[0] & PASID_PTE_PRESENT))
+ return;
+
did = pasid_get_domain_id(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
@@ -522,6 +549,22 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
+static inline int pasid_enable_wpe(struct pasid_entry *pte)
+{
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err_ratelimited("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ pasid_set_wpe(pte);
+
+ return 0;
+};
+
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -553,6 +596,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
pasid_set_sre(pte);
+ if (pasid_enable_wpe(pte))
+ return -EINVAL;
+
}
if (flags & PASID_FLAG_FL5LP) {
@@ -565,6 +611,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
+ if (flags & PASID_FLAG_PAGE_SNOOP)
+ pasid_set_pgsnp(pte);
+
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -643,6 +692,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ pasid_set_pgsnp(pte);
+
/*
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
@@ -706,6 +758,9 @@ intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
return -EINVAL;
}
pasid_set_sre(pte);
+ /* Enable write protect WP if guest requested */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
+ pasid_set_wpe(pte);
}
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {