--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cache.c - Intel VT-d cache invalidation
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/dmar.h>
+#include <linux/iommu.h>
+#include <linux/memory.h>
+#include <linux/spinlock.h>
+
+#include "iommu.h"
+#include "pasid.h"
+
+/* Check if an existing cache tag can be reused for a new association. */
+static bool cache_tage_match(struct cache_tag *tag, u16 domain_id,
+ struct intel_iommu *iommu, struct device *dev,
+ ioasid_t pasid, enum cache_tag_type type)
+{
+ if (tag->type != type)
+ return false;
+
+ if (tag->domain_id != domain_id || tag->pasid != pasid)
+ return false;
+
+ if (type == CACHE_TAG_IOTLB || type == CACHE_TAG_NESTING_IOTLB)
+ return tag->iommu == iommu;
+
+ if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
+ return tag->dev == dev;
+
+ return false;
+}
+
+/* Assign a cache tag with specified type to domain. */
+static int cache_tag_assign(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid,
+ enum cache_tag_type type)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct cache_tag *tag, *temp;
+ unsigned long flags;
+
+ tag = kzalloc(sizeof(*tag), GFP_KERNEL);
+ if (!tag)
+ return -ENOMEM;
+
+ tag->type = type;
+ tag->iommu = iommu;
+ tag->domain_id = did;
+ tag->pasid = pasid;
+ tag->users = 1;
+
+ if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
+ tag->dev = dev;
+ else
+ tag->dev = iommu->iommu.dev;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(temp, &domain->cache_tags, node) {
+ if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
+ temp->users++;
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+ kfree(tag);
+ return 0;
+ }
+ }
+ list_add_tail(&tag->node, &domain->cache_tags);
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+
+ return 0;
+}
+
+/* Unassign a cache tag with specified type from domain. */
+static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid,
+ enum cache_tag_type type)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct cache_tag *tag;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ list_for_each_entry(tag, &domain->cache_tags, node) {
+ if (cache_tage_match(tag, did, iommu, dev, pasid, type)) {
+ if (--tag->users == 0) {
+ list_del(&tag->node);
+ kfree(tag);
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+}
+
+static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+ if (ret || !info->ats_enabled)
+ return ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
+ if (ret)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+
+ return ret;
+}
+
+static void __cache_tag_unassign_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
+
+ if (info->ats_enabled)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
+}
+
+static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+ if (ret || !info->ats_enabled)
+ return ret;
+
+ ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
+ if (ret)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+
+ return ret;
+}
+
+static void __cache_tag_unassign_parent_domain(struct dmar_domain *domain, u16 did,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
+
+ if (info->ats_enabled)
+ cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
+}
+
+static u16 domain_get_id_for_dev(struct dmar_domain *domain, struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ /*
+ * The driver assigns different domain IDs for all domains except
+ * the SVA type.
+ */
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return FLPT_DEFAULT_DID;
+
+ return domain_id_iommu(domain, iommu);
+}
+
+/*
+ * Assign cache tags to a domain when it's associated with a device's
+ * PASID using a specific domain ID.
+ *
+ * On success (return value of 0), cache tags are created and added to the
+ * domain's cache tag list. On failure (negative return value), an error
+ * code is returned indicating the reason for the failure.
+ */
+int cache_tag_assign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ u16 did = domain_get_id_for_dev(domain, dev);
+ int ret;
+
+ ret = __cache_tag_assign_domain(domain, did, dev, pasid);
+ if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
+ return ret;
+
+ ret = __cache_tag_assign_parent_domain(domain->s2_domain, did, dev, pasid);
+ if (ret)
+ __cache_tag_unassign_domain(domain, did, dev, pasid);
+
+ return ret;
+}
+
+/*
+ * Remove the cache tags associated with a device's PASID when the domain is
+ * detached from the device.
+ *
+ * The cache tags must be previously assigned to the domain by calling the
+ * assign interface.
+ */
+void cache_tag_unassign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ u16 did = domain_get_id_for_dev(domain, dev);
+
+ __cache_tag_unassign_domain(domain, did, dev, pasid);
+ if (domain->domain.type == IOMMU_DOMAIN_NESTED)
+ __cache_tag_unassign_parent_domain(domain->s2_domain, did, dev, pasid);
+}
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
+ INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->lock);
+ spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
return domain;
unsigned long ndomains;
int num, ret = -ENOSPC;
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return 0;
+
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
{
struct iommu_domain_info *info;
+ if (domain->domain.type == IOMMU_DOMAIN_SVA)
+ return;
+
spin_lock(&iommu->lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
+
+ ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
+ if (ret) {
+ domain_detach_iommu(domain, iommu);
+ return ret;
+ }
+
info->domain = domain;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags);
+ cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
domain_detach_iommu(info->domain, iommu);
info->domain = NULL;
}
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
if (WARN_ON_ONCE(!domain))
goto out_tear_down;
+ dmar_domain = to_dmar_domain(domain);
/*
* The SVA implementation needs to handle its own stuffs like the mm
*/
if (domain->type == IOMMU_DOMAIN_SVA) {
intel_svm_remove_dev_pasid(dev, pasid);
+ cache_tag_unassign_domain(dmar_domain, dev, pasid);
goto out_tear_down;
}
- dmar_domain = to_dmar_domain(domain);
spin_lock_irqsave(&dmar_domain->lock, flags);
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
if (curr->dev == dev && curr->pasid == pasid) {
WARN_ON_ONCE(!dev_pasid);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
if (ret)
goto out_free;
+ ret = cache_tag_assign_domain(dmar_domain, dev, pasid);
+ if (ret)
+ goto out_detach_iommu;
+
if (domain_type_is_si(dmar_domain))
ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
else if (dmar_domain->use_first_level)
ret = intel_pasid_setup_second_level(iommu, dmar_domain,
dev, pasid);
if (ret)
- goto out_detach_iommu;
+ goto out_unassign_tag;
dev_pasid->dev = dev;
dev_pasid->pasid = pasid;
intel_iommu_debugfs_create_dev_pasid(dev_pasid);
return 0;
+out_unassign_tag:
+ cache_tag_unassign_domain(dmar_domain, dev, pasid);
out_detach_iommu:
domain_detach_iommu(dmar_domain, iommu);
out_free:
struct list_head devices; /* all devices' list */
struct list_head dev_pasids; /* all attached pasids */
+ spinlock_t cache_lock; /* Protect the cache tag list */
+ struct list_head cache_tags; /* Cache tag list */
+
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
const struct iommu_user_data *user_data);
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
+enum cache_tag_type {
+ CACHE_TAG_IOTLB,
+ CACHE_TAG_DEVTLB,
+ CACHE_TAG_NESTING_IOTLB,
+ CACHE_TAG_NESTING_DEVTLB,
+};
+
+struct cache_tag {
+ struct list_head node;
+ enum cache_tag_type type;
+ struct intel_iommu *iommu;
+ /*
+ * The @dev field represents the location of the cache. For IOTLB, it
+ * resides on the IOMMU hardware. @dev stores the device pointer to
+ * the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint.
+ * @dev stores the device pointer to that endpoint.
+ */
+ struct device *dev;
+ u16 domain_id;
+ ioasid_t pasid;
+ unsigned int users;
+};
+
+int cache_tag_assign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void cache_tag_unassign_domain(struct dmar_domain *domain,
+ struct device *dev, ioasid_t pasid);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);