summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel')
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cap_audit.c205
-rw-r--r--drivers/iommu/intel/cap_audit.h130
-rw-r--r--drivers/iommu/intel/dmar.c11
-rw-r--r--drivers/iommu/intel/iommu.c279
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/intel/pasid.c18
-rw-r--r--drivers/iommu/intel/svm.c73
8 files changed, 550 insertions, 176 deletions
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index fb8e1e8c8029..ae236ec7d219 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
new file mode 100644
index 000000000000..b12e421a2f1a
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cap_audit.c - audit iommu capabilities for boot time and hot plug
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ * Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/intel-iommu.h>
+#include "cap_audit.h"
+
+static u64 intel_iommu_cap_sanity;
+static u64 intel_iommu_ecap_sanity;
+
+static inline void check_irq_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
+}
+
+static inline void check_dmar_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
+
+ CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
+}
+
+static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ bool mismatch = false;
+ u64 old_cap = intel_iommu_cap_sanity;
+ u64 old_ecap = intel_iommu_ecap_sanity;
+
+ if (type == CAP_AUDIT_HOTPLUG_IRQR) {
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
+ goto out;
+ }
+
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
+
+ /* Abort hot plug if the hot plug iommu feature is smaller than global */
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
+
+out:
+ if (mismatch) {
+ intel_iommu_cap_sanity = old_cap;
+ intel_iommu_ecap_sanity = old_ecap;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ struct dmar_drhd_unit *d;
+ struct intel_iommu *i;
+
+ rcu_read_lock();
+ if (list_empty(&dmar_drhd_units))
+ goto out;
+
+ for_each_active_iommu(i, d) {
+ if (!iommu) {
+ intel_iommu_ecap_sanity = i->ecap;
+ intel_iommu_cap_sanity = i->cap;
+ iommu = i;
+ continue;
+ }
+
+ if (type == CAP_AUDIT_STATIC_DMAR)
+ check_dmar_capabilities(iommu, i);
+ else
+ check_irq_capabilities(iommu, i);
+ }
+
+out:
+ rcu_read_unlock();
+ return 0;
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
+{
+ switch (type) {
+ case CAP_AUDIT_STATIC_DMAR:
+ case CAP_AUDIT_STATIC_IRQR:
+ return cap_audit_static(iommu, type);
+ case CAP_AUDIT_HOTPLUG_DMAR:
+ case CAP_AUDIT_HOTPLUG_IRQR:
+ return cap_audit_hotplug(iommu, type);
+ default:
+ break;
+ }
+
+ return -EFAULT;
+}
+
+bool intel_cap_smts_sanity(void)
+{
+ return ecap_smts(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_pasid_sanity(void)
+{
+ return ecap_pasid(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_nest_sanity(void)
+{
+ return ecap_nest(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_flts_sanity(void)
+{
+ return ecap_flts(intel_iommu_ecap_sanity);
+}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
new file mode 100644
index 000000000000..74cfccae0e81
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cap_audit.h - audit iommu capabilities header
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ */
+
+/*
+ * Capability Register Mask
+ */
+#define CAP_FL5LP_MASK BIT_ULL(60)
+#define CAP_PI_MASK BIT_ULL(59)
+#define CAP_FL1GP_MASK BIT_ULL(56)
+#define CAP_RD_MASK BIT_ULL(55)
+#define CAP_WD_MASK BIT_ULL(54)
+#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
+#define CAP_NFR_MASK GENMASK_ULL(47, 40)
+#define CAP_PSI_MASK BIT_ULL(39)
+#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
+#define CAP_FRO_MASK GENMASK_ULL(33, 24)
+#define CAP_ZLR_MASK BIT_ULL(22)
+#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
+#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
+#define CAP_CM_MASK BIT_ULL(7)
+#define CAP_PHMR_MASK BIT_ULL(6)
+#define CAP_PLMR_MASK BIT_ULL(5)
+#define CAP_RWBF_MASK BIT_ULL(4)
+#define CAP_AFL_MASK BIT_ULL(3)
+#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
+
+/*
+ * Extended Capability Register Mask
+ */
+#define ECAP_RPS_MASK BIT_ULL(49)
+#define ECAP_SMPWC_MASK BIT_ULL(48)
+#define ECAP_FLTS_MASK BIT_ULL(47)
+#define ECAP_SLTS_MASK BIT_ULL(46)
+#define ECAP_SLADS_MASK BIT_ULL(45)
+#define ECAP_VCS_MASK BIT_ULL(44)
+#define ECAP_SMTS_MASK BIT_ULL(43)
+#define ECAP_PDS_MASK BIT_ULL(42)
+#define ECAP_DIT_MASK BIT_ULL(41)
+#define ECAP_PASID_MASK BIT_ULL(40)
+#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
+#define ECAP_EAFS_MASK BIT_ULL(34)
+#define ECAP_NWFS_MASK BIT_ULL(33)
+#define ECAP_SRS_MASK BIT_ULL(31)
+#define ECAP_ERS_MASK BIT_ULL(30)
+#define ECAP_PRS_MASK BIT_ULL(29)
+#define ECAP_NEST_MASK BIT_ULL(26)
+#define ECAP_MTS_MASK BIT_ULL(25)
+#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
+#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
+#define ECAP_SC_MASK BIT_ULL(7)
+#define ECAP_PT_MASK BIT_ULL(6)
+#define ECAP_EIM_MASK BIT_ULL(4)
+#define ECAP_DT_MASK BIT_ULL(2)
+#define ECAP_QI_MASK BIT_ULL(1)
+#define ECAP_C_MASK BIT_ULL(0)
+
+/*
+ * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
+ * IOMMU gets audited.
+ */
+#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(a) != cap##_##feature(b)) { \
+ intel_iommu_##cap##_sanity &= ~(MASK); \
+ pr_info("IOMMU feature %s inconsistent", #feature); \
+ } \
+} while (0)
+
+#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+ DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
+
+#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(intel_iommu_##cap##_sanity)) \
+ DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
+ (b)->cap, cap, feature, MASK); \
+} while (0)
+
+#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
+do { \
+ u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
+ min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
+ intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
+ min_feature; \
+} while (0)
+
+#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
+do { \
+ if ((intel_iommu_##cap##_sanity & (MASK)) > \
+ (cap##_##feature((iommu)->cap))) \
+ mismatch = true; \
+ else \
+ (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
+ (intel_iommu_##cap##_sanity & (MASK)); \
+} while (0)
+
+enum cap_audit_type {
+ CAP_AUDIT_STATIC_DMAR,
+ CAP_AUDIT_STATIC_IRQR,
+ CAP_AUDIT_HOTPLUG_DMAR,
+ CAP_AUDIT_HOTPLUG_IRQR,
+};
+
+bool intel_cap_smts_sanity(void);
+bool intel_cap_pasid_sanity(void);
+bool intel_cap_nest_sanity(void);
+bool intel_cap_flts_sanity(void);
+
+static inline bool scalable_mode_support(void)
+{
+ return (intel_iommu_sm && intel_cap_smts_sanity());
+}
+
+static inline bool pasid_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_pasid_sanity();
+}
+
+static inline bool nested_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_nest_sanity();
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 02e7c10a4224..d5c51b5c20af 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -31,6 +31,7 @@
#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
+#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
@@ -525,6 +526,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_rhsa *rhsa;
+ struct acpi_dmar_satc *satc;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -554,6 +556,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
/* We don't print this here because we need to sanity-check
it first. So print it in dmar_parse_one_andd() instead. */
break;
+ case ACPI_DMAR_TYPE_SATC:
+ satc = container_of(header, struct acpi_dmar_satc, header);
+ pr_info("SATC flags: 0x%x\n", satc->flags);
+ break;
}
}
@@ -641,6 +647,7 @@ parse_dmar_table(void)
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
};
/*
@@ -1307,6 +1314,8 @@ restart:
offset = ((index + i) % QI_LENGTH) << shift;
memcpy(qi->desc + offset, &desc[i], 1 << shift);
qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+ trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
+ desc[i].qw2, desc[i].qw3);
}
qi->desc_status[wait_index] = QI_IN_USE;
@@ -2074,6 +2083,7 @@ static guid_t dmar_hp_guid =
#define DMAR_DSM_FUNC_DRHD 1
#define DMAR_DSM_FUNC_ATSR 2
#define DMAR_DSM_FUNC_RHSA 3
+#define DMAR_DSM_FUNC_SATC 4
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
@@ -2091,6 +2101,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
};
if (!dmar_detect_dsm(handle, func))
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 06b00b5363d8..ee0932307d64 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -44,10 +44,10 @@
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
#include "pasid.h"
+#include "cap_audit.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -316,8 +316,18 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+struct dmar_satc_unit {
+ struct list_head list; /* list of SATC units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct dmar_dev_scope *devices; /* target devices */
+ struct intel_iommu *iommu; /* the corresponding iommu */
+ int devices_cnt; /* target device count */
+ u8 atc_required:1; /* ATS is required */
+};
+
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
+static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
@@ -1017,8 +1027,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ pteval |= DMA_FL_PTE_ACCESS;
+ }
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1861,25 +1874,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
*/
static bool first_level_by_default(void)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- static int first_level_support = -1;
-
- if (likely(first_level_support != -1))
- return first_level_support;
-
- first_level_support = 1;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
- first_level_support = 0;
- break;
- }
- }
- rcu_read_unlock();
-
- return first_level_support;
+ return scalable_mode_support() && intel_cap_flts_sanity();
}
static struct dmar_domain *alloc_domain(int flags)
@@ -2298,9 +2293,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
- struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
@@ -2310,9 +2305,16 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+ attr |= DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+ }
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2322,7 +2324,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+ pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
@@ -2383,34 +2385,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
+ *
+ * We leave clflush for the leaf pte changes to iotlb_sync_map()
+ * callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
+ (largepage_lvl > 1 && nr_pages < lvl_pages))
pte = NULL;
- }
- }
-
- return 0;
-}
-
-static int
-domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot)
-{
- int iommu_id, ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
@@ -3197,6 +3179,10 @@ static int __init init_dmars(void)
goto error;
}
+ ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
+ if (ret)
+ goto free_iommu;
+
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -3740,6 +3726,57 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
+{
+ struct dmar_satc_unit *satcu;
+ struct acpi_dmar_satc *tmp;
+
+ list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
+ dmar_rcu_check()) {
+ tmp = (struct acpi_dmar_satc *)satcu->hdr;
+ if (satc->segment != tmp->segment)
+ continue;
+ if (satc->header.length != tmp->header.length)
+ continue;
+ if (memcmp(satc, tmp, satc->header.length) == 0)
+ return satcu;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_satc *satc;
+ struct dmar_satc_unit *satcu;
+
+ if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ return 0;
+
+ satc = container_of(hdr, struct acpi_dmar_satc, header);
+ satcu = dmar_find_satc(satc);
+ if (satcu)
+ return 0;
+
+ satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
+ if (!satcu)
+ return -ENOMEM;
+
+ satcu->hdr = (void *)(satcu + 1);
+ memcpy(satcu->hdr, hdr, hdr->length);
+ satcu->atc_required = satc->flags & 0x1;
+ satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ &satcu->devices_cnt);
+ if (satcu->devices_cnt && !satcu->devices) {
+ kfree(satcu);
+ return -ENOMEM;
+ }
+ list_add_rcu(&satcu->list, &dmar_satc_units);
+
+ return 0;
+}
+
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret;
@@ -3748,6 +3785,10 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (g_iommus[iommu->seq_id])
return 0;
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
+ if (ret)
+ goto out;
+
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name);
@@ -3843,6 +3884,7 @@ static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
+ struct dmar_satc_unit *satcu, *satc_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
@@ -3854,6 +3896,11 @@ static void intel_iommu_free_dmars(void)
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
+ list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
+ list_del(&satcu->list);
+ dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
+ kfree(satcu);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3905,8 +3952,10 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
+ struct dmar_satc_unit *satcu;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_satc *satc;
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
return 0;
@@ -3947,6 +3996,23 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
}
}
+ list_for_each_entry(satcu, &dmar_satc_units, list) {
+ satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ satc->segment, satcu->devices,
+ satcu->devices_cnt);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
+ if (dmar_remove_dev_scope(info, satc->segment,
+ satcu->devices, satcu->devices_cnt))
+ break;
+ }
+ }
return 0;
}
@@ -4290,6 +4356,9 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
+ if (list_empty(&dmar_satc_units))
+ pr_info("No SATC found\n");
+
if (dmar_map_gfx)
intel_iommu_gfx_mapped = 1;
@@ -4943,7 +5012,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
- int ret;
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
@@ -4969,9 +5037,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
+ return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
@@ -5040,60 +5107,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
-static inline bool scalable_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool iommu_pasid_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!pasid_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool nested_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5334,7 +5347,7 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
int ret;
if (!dev_is_pci(dev) || dmar_disabled ||
- !scalable_mode_support() || !iommu_pasid_support())
+ !scalable_mode_support() || !pasid_mode_support())
return false;
ret = pci_pasid_features(to_pci_dev(dev));
@@ -5508,6 +5521,57 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
+static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
+ unsigned long clf_pages)
+{
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ unsigned long lvl_pages = 0;
+ int level = 0;
+
+ while (clf_pages > 0) {
+ if (!pte) {
+ level = 0;
+ pte = pfn_to_dma_pte(domain, clf_pfn, &level);
+ if (WARN_ON(!pte))
+ return;
+ first_pte = pte;
+ lvl_pages = lvl_to_nr_pages(level);
+ }
+
+ if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
+ return;
+
+ clf_pages -= lvl_pages;
+ clf_pfn += lvl_pages;
+ pte++;
+
+ if (!clf_pages || first_pte_in_page(pte) ||
+ (level > 1 && clf_pages < lvl_pages)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ }
+}
+
+static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long pages = aligned_nrpages(iova, size);
+ unsigned long pfn = iova >> VTD_PAGE_SHIFT;
+ struct intel_iommu *iommu;
+ int iommu_id;
+
+ if (!dmar_domain->iommu_coherency)
+ clflush_sync_map(dmar_domain, pfn, pages);
+
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, dmar_domain, pfn, pages);
+ }
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5520,6 +5584,7 @@ const struct iommu_ops intel_iommu_ops = {
.aux_detach_dev = intel_iommu_aux_detach_device,
.aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
.unmap = intel_iommu_unmap,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 685200a5cff0..611ef5243cb6 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -22,6 +22,7 @@
#include <asm/pci-direct.h>
#include "../irq_remapping.h"
+#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -734,6 +735,9 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
+ if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
+ goto error;
+
if (!dmar_ir_support())
return -ENODEV;
@@ -1439,6 +1443,10 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
+ if (ret)
+ return ret;
+
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index b92af83b79bd..f26cb6195b2c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -457,20 +457,6 @@ pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
}
static void
-iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
-{
- struct qi_desc desc;
-
- desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- desc.qw2 = 0;
- desc.qw3 = 0;
-
- qi_submit_sync(iommu, &desc, 1, 0);
-}
-
-static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
@@ -514,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
@@ -530,7 +516,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
} else {
iommu_flush_write_buffer(iommu);
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 18a9f05df407..574a7e657a9a 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -123,53 +123,16 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
unsigned long address,
unsigned long pages, int ih)
{
- struct qi_desc desc;
+ struct device_domain_info *info = get_domain_info(sdev->dev);
- if (pages == -1) {
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- } else {
- int mask = ilog2(__roundup_pow_of_two(pages));
-
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_IH(ih) |
- QI_EIOTLB_AM(mask);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(sdev->iommu, &desc, 1, 0);
-
- if (sdev->dev_iotlb) {
- desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
- QI_DEV_EIOTLB_SID(sdev->sid) |
- QI_DEV_EIOTLB_QDEP(sdev->qdep) |
- QI_DEIOTLB_TYPE;
- if (pages == -1) {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
- QI_DEV_EIOTLB_SIZE;
- } else if (pages > 1) {
- /* The least significant zero bit indicates the size. So,
- * for example, an "address" value of 0x12345f000 will
- * flush from 0x123440000 to 0x12347ffff (256KiB). */
- unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
- unsigned long mask = __rounddown_pow_of_two(address ^ last);
-
- desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
- (mask - 1)) | QI_DEV_EIOTLB_SIZE;
- } else {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(sdev->iommu, &desc, 1, 0);
- }
+ if (WARN_ON(!pages))
+ return;
+
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+ svm->pasid, sdev->qdep, address,
+ order_base_2(pages));
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
@@ -948,10 +911,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
u64 address;
handled = 1;
-
req = &iommu->prq[head / sizeof(*req)];
-
- result = QI_RESP_FAILURE;
+ result = QI_RESP_INVALID;
address = (u64)req->addr << VTD_PAGE_SHIFT;
if (!req->pasid_present) {
pr_err("%s: Page request without PASID: %08llx %08llx\n",
@@ -989,7 +950,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
rcu_read_unlock();
}
- result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
if (!svm->mm)
@@ -1079,8 +1039,17 @@ prq_advance:
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling.
*/
- if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
- writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
+ iommu->name);
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ if (head == tail) {
+ writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
+ iommu->name);
+ }
+ }
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);