extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
--- --bool amd_iommu_v2_supported(void);
--- --
--- --/* Device capabilities */
--- --int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
--- --void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
+++ ++/* Protection domain ops */
+++ ++struct protection_domain *protection_domain_alloc(unsigned int type);
+++ ++void protection_domain_free(struct protection_domain *domain);
+++ ++struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+++ ++ struct mm_struct *mm);
+++ ++void amd_iommu_domain_free(struct iommu_domain *dom);
+++ ++int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
+++ ++ struct device *dev, ioasid_t pasid);
- void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid);
++++++void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
++++++ struct iommu_domain *domain);
+++ ++
+++ ++/* SVA/PASID */
+++ ++bool amd_iommu_pasid_supported(void);
+++ ++
+++ ++/* IOPF */
+++ ++int amd_iommu_iopf_init(struct amd_iommu *iommu);
+++ ++void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
+++ ++void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
+++ ++ struct iommu_page_response *resp);
+++ ++int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+++ ++ struct iommu_dev_data *dev_data);
+++ ++void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+++ ++ struct iommu_dev_data *dev_data);
/* GCR3 setup */
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user,
+++ ++ .domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
---- - .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
- void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+++ ++// SPDX-License-Identifier: GPL-2.0-only
+++ ++/*
+++ ++ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+++ ++ */
+++ ++
+++ ++#define pr_fmt(fmt) "AMD-Vi: " fmt
+++ ++#define dev_fmt(fmt) pr_fmt(fmt)
+++ ++
+++ ++#include <linux/iommu.h>
+++ ++#include <linux/mm_types.h>
+++ ++
+++ ++#include "amd_iommu.h"
+++ ++
+++ ++static inline bool is_pasid_enabled(struct iommu_dev_data *dev_data)
+++ ++{
+++ ++ if (dev_data->pasid_enabled && dev_data->max_pasids &&
+++ ++ dev_data->gcr3_info.gcr3_tbl != NULL)
+++ ++ return true;
+++ ++
+++ ++ return false;
+++ ++}
+++ ++
+++ ++static inline bool is_pasid_valid(struct iommu_dev_data *dev_data,
+++ ++ ioasid_t pasid)
+++ ++{
+++ ++ if (pasid > 0 && pasid < dev_data->max_pasids)
+++ ++ return true;
+++ ++
+++ ++ return false;
+++ ++}
+++ ++
+++ ++static void remove_dev_pasid(struct pdom_dev_data *pdom_dev_data)
+++ ++{
+++ ++ /* Update GCR3 table and flush IOTLB */
+++ ++ amd_iommu_clear_gcr3(pdom_dev_data->dev_data, pdom_dev_data->pasid);
+++ ++
+++ ++ list_del(&pdom_dev_data->list);
+++ ++ kfree(pdom_dev_data);
+++ ++}
+++ ++
+++ ++/* Clear PASID from device GCR3 table and remove pdom_dev_data from list */
+++ ++static void remove_pdom_dev_pasid(struct protection_domain *pdom,
+++ ++ struct device *dev, ioasid_t pasid)
+++ ++{
+++ ++ struct pdom_dev_data *pdom_dev_data;
+++ ++ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+++ ++
+++ ++ lockdep_assert_held(&pdom->lock);
+++ ++
+++ ++ for_each_pdom_dev_data(pdom_dev_data, pdom) {
+++ ++ if (pdom_dev_data->dev_data == dev_data &&
+++ ++ pdom_dev_data->pasid == pasid) {
+++ ++ remove_dev_pasid(pdom_dev_data);
+++ ++ break;
+++ ++ }
+++ ++ }
+++ ++}
+++ ++
+++ ++static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+++ ++ struct mm_struct *mm,
+++ ++ unsigned long start, unsigned long end)
+++ ++{
+++ ++ struct pdom_dev_data *pdom_dev_data;
+++ ++ struct protection_domain *sva_pdom;
+++ ++ unsigned long flags;
+++ ++
+++ ++ sva_pdom = container_of(mn, struct protection_domain, mn);
+++ ++
+++ ++ spin_lock_irqsave(&sva_pdom->lock, flags);
+++ ++
+++ ++ for_each_pdom_dev_data(pdom_dev_data, sva_pdom) {
+++ ++ amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data,
+++ ++ pdom_dev_data->pasid,
+++ ++ start, end - start);
+++ ++ }
+++ ++
+++ ++ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+++ ++}
+++ ++
+++ ++static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+++ ++{
+++ ++ struct pdom_dev_data *pdom_dev_data, *next;
+++ ++ struct protection_domain *sva_pdom;
+++ ++ unsigned long flags;
+++ ++
+++ ++ sva_pdom = container_of(mn, struct protection_domain, mn);
+++ ++
+++ ++ spin_lock_irqsave(&sva_pdom->lock, flags);
+++ ++
+++ ++ /* Assume dev_data_list contains same PASID with different devices */
+++ ++ for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom)
+++ ++ remove_dev_pasid(pdom_dev_data);
+++ ++
+++ ++ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+++ ++}
+++ ++
+++ ++static const struct mmu_notifier_ops sva_mn = {
+++ ++ .arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs,
+++ ++ .release = sva_mn_release,
+++ ++};
+++ ++
+++ ++int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
+++ ++ struct device *dev, ioasid_t pasid)
+++ ++{
+++ ++ struct pdom_dev_data *pdom_dev_data;
+++ ++ struct protection_domain *sva_pdom = to_pdomain(domain);
+++ ++ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+++ ++ unsigned long flags;
+++ ++ int ret = -EINVAL;
+++ ++
+++ ++ /* PASID zero is used for requests from the I/O device without PASID */
+++ ++ if (!is_pasid_valid(dev_data, pasid))
+++ ++ return ret;
+++ ++
+++ ++ /* Make sure PASID is enabled */
+++ ++ if (!is_pasid_enabled(dev_data))
+++ ++ return ret;
+++ ++
+++ ++ /* Add PASID to protection domain pasid list */
+++ ++ pdom_dev_data = kzalloc(sizeof(*pdom_dev_data), GFP_KERNEL);
+++ ++ if (pdom_dev_data == NULL)
+++ ++ return ret;
+++ ++
+++ ++ pdom_dev_data->pasid = pasid;
+++ ++ pdom_dev_data->dev_data = dev_data;
+++ ++
+++ ++ spin_lock_irqsave(&sva_pdom->lock, flags);
+++ ++
+++ ++ /* Setup GCR3 table */
+++ ++ ret = amd_iommu_set_gcr3(dev_data, pasid,
+++ ++ iommu_virt_to_phys(domain->mm->pgd));
+++ ++ if (ret) {
+++ ++ kfree(pdom_dev_data);
+++ ++ goto out_unlock;
+++ ++ }
+++ ++
+++ ++ list_add(&pdom_dev_data->list, &sva_pdom->dev_data_list);
+++ ++
+++ ++out_unlock:
+++ ++ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+++ ++ return ret;
+++ ++}
+++ ++
- struct iommu_domain *domain;
++++++void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
++++++ struct iommu_domain *domain)
+++ ++{
+++ ++ struct protection_domain *sva_pdom;
- /* Get protection domain */
- domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
- if (!domain)
- return;
+++ ++ unsigned long flags;
+++ ++
+++ ++ if (!is_pasid_valid(dev_iommu_priv_get(dev), pasid))
+++ ++ return;
+++ ++
+++ ++ sva_pdom = to_pdomain(domain);
+++ ++
+++ ++ spin_lock_irqsave(&sva_pdom->lock, flags);
+++ ++
+++ ++ /* Remove PASID from dev_data_list */
+++ ++ remove_pdom_dev_pasid(sva_pdom, dev, pasid);
+++ ++
+++ ++ spin_unlock_irqrestore(&sva_pdom->lock, flags);
+++ ++}
+++ ++
+++ ++static void iommu_sva_domain_free(struct iommu_domain *domain)
+++ ++{
+++ ++ struct protection_domain *sva_pdom = to_pdomain(domain);
+++ ++
+++ ++ if (sva_pdom->mn.ops)
+++ ++ mmu_notifier_unregister(&sva_pdom->mn, domain->mm);
+++ ++
+++ ++ amd_iommu_domain_free(domain);
+++ ++}
+++ ++
+++ ++static const struct iommu_domain_ops amd_sva_domain_ops = {
+++ ++ .set_dev_pasid = iommu_sva_set_dev_pasid,
+++ ++ .free = iommu_sva_domain_free
+++ ++};
+++ ++
+++ ++struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+++ ++ struct mm_struct *mm)
+++ ++{
+++ ++ struct protection_domain *pdom;
+++ ++ int ret;
+++ ++
+++ ++ pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
+++ ++ if (!pdom)
+++ ++ return ERR_PTR(-ENOMEM);
+++ ++
+++ ++ pdom->domain.ops = &amd_sva_domain_ops;
+++ ++ pdom->mn.ops = &sva_mn;
+++ ++
+++ ++ ret = mmu_notifier_register(&pdom->mn, mm);
+++ ++ if (ret) {
+++ ++ protection_domain_free(pdom);
+++ ++ return ERR_PTR(ret);
+++ ++ }
+++ ++
+++ ++ return &pdom->domain;
+++ ++}
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-- ---static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
++ +++struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
++ +++ u32 ssid)
{
-- --- __le64 *l1ptr;
-- --- unsigned int idx;
struct arm_smmu_l1_ctx_desc *l1_desc;
-- --- struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
++ +++ if (!cd_table->cdtab)
++ +++ return NULL;
++ +++
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
-- --- return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
++ +++ return (struct arm_smmu_cd *)(cd_table->cdtab +
++ +++ ssid * CTXDESC_CD_DWORDS);
-- --- idx = ssid >> CTXDESC_SPLIT;
-- --- l1_desc = &cd_table->l1_desc[idx];
-- --- if (!l1_desc->l2ptr) {
-- --- if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
++ +++ l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
++ +++ if (!l1_desc->l2ptr)
++ +++ return NULL;
++ +++ return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
++ +++}
++ +++
++ +++struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
++ +++ u32 ssid)
++ +++{
++ +++ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
++ +++ struct arm_smmu_device *smmu = master->smmu;
++ +++
++ +++ might_sleep();
++ +++ iommu_group_mutex_assert(master->dev);
++ +++
++ +++ if (!cd_table->cdtab) {
++ +++ if (arm_smmu_alloc_cd_tables(master))
return NULL;
-- - - l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
-- - - arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
-- - - /* An invalid L1CD can be cached */
-- - - arm_smmu_sync_cd(master, ssid, false);
++ +++ }
++ +++
++ +++ if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
++ +++ unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
++ +++ struct arm_smmu_l1_ctx_desc *l1_desc;
++ +++
++ +++ l1_desc = &cd_table->l1_desc[idx];
++ +++ if (!l1_desc->l2ptr) {
++ +++ __le64 *l1ptr;
+
- l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
- arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
- /* An invalid L1CD can be cached */
- arm_smmu_sync_cd(master, ssid, false);
++ +++ if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
++ +++ return NULL;
++ + +
++ +++ l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
++ +++ arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
++ +++ /* An invalid L1CD can be cached */
++ +++ arm_smmu_sync_cd(master, ssid, false);
++ +++ }
}
-- --- idx = ssid & (CTXDESC_L2_ENTRIES - 1);
-- --- return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
++ +++ return arm_smmu_get_cd_ptr(master, ssid);
}
-- ---int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
-- --- struct arm_smmu_ctx_desc *cd)
++ +++struct arm_smmu_cd_writer {
++ +++ struct arm_smmu_entry_writer writer;
++ +++ unsigned int ssid;
++ +++};
++ +++
++ +++VISIBLE_IF_KUNIT
++ +++void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
{
++ +++ used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V);
++ +++ if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V)))
++ +++ return;
++ +++ memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd));
++ +++
/*
-- --- * This function handles the following cases:
-- --- *
-- --- * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
-- --- * (2) Install a secondary CD, for SID+SSID traffic.
-- --- * (3) Update ASID of a CD. Atomically write the first 64 bits of the
-- --- * CD, then invalidate the old entry and mappings.
-- --- * (4) Quiesce the context without clearing the valid bit. Disable
-- --- * translation, and ignore any translation fault.
-- --- * (5) Remove a secondary CD.
++ +++ * If EPD0 is set by the make function it means
++ +++ * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED
*/
-- --- u64 val;
-- --- bool cd_live;
-- --- __le64 *cdptr;
-- --- struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
-- --- struct arm_smmu_device *smmu = master->smmu;
++ +++ if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) {
++ +++ used_bits[0] &= ~cpu_to_le64(
++ +++ CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 |
++ +++ CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 |
++ +++ CTXDESC_CD_0_TCR_SH0);
++ +++ used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
++ +++ }
++ +++}
++ +++EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
-- --- if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
-- --- return -E2BIG;
++ +++static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
++ +++{
++ +++ struct arm_smmu_cd_writer *cd_writer =
++ +++ container_of(writer, struct arm_smmu_cd_writer, writer);
-- --- cdptr = arm_smmu_get_cd_ptr(master, ssid);
-- --- if (!cdptr)
-- --- return -ENOMEM;
++ +++ arm_smmu_sync_cd(writer->master, cd_writer->ssid, true);
++ +++}
-- --- val = le64_to_cpu(cdptr[0]);
-- --- cd_live = !!(val & CTXDESC_CD_0_V);
-- ---
-- --- if (!cd) { /* (5) */
-- --- val = 0;
-- --- } else if (cd == &quiet_cd) { /* (4) */
-- --- if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
-- --- val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
-- --- val |= CTXDESC_CD_0_TCR_EPD0;
-- --- } else if (cd_live) { /* (3) */
-- --- val &= ~CTXDESC_CD_0_ASID;
-- --- val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
-- --- /*
-- --- * Until CD+TLB invalidation, both ASIDs may be used for tagging
-- --- * this substream's traffic
-- --- */
-- --- } else { /* (1) and (2) */
-- --- cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
-- --- cdptr[2] = 0;
-- --- cdptr[3] = cpu_to_le64(cd->mair);
++ +++static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = {
++ +++ .sync = arm_smmu_cd_writer_sync_entry,
++ +++ .get_used = arm_smmu_get_cd_used,
++ +++};
-- --- /*
-- --- * STE may be live, and the SMMU might read dwords of this CD in any
-- --- * order. Ensure that it observes valid values before reading
-- --- * V=1.
-- --- */
-- --- arm_smmu_sync_cd(master, ssid, true);
++ +++void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
++ +++ struct arm_smmu_cd *cdptr,
++ +++ const struct arm_smmu_cd *target)
++ +++{
++ +++ struct arm_smmu_cd_writer cd_writer = {
++ +++ .writer = {
++ +++ .ops = &arm_smmu_cd_writer_ops,
++ +++ .master = master,
++ +++ },
++ +++ .ssid = ssid,
++ +++ };
++ + +
- val = cd->tcr |
++ +++ arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
++ +++}
+
-- - - val = cd->tcr |
++ +++void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
++ +++ struct arm_smmu_master *master,
++ +++ struct arm_smmu_domain *smmu_domain)
++ +++{
++ +++ struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
++ +++ const struct io_pgtable_cfg *pgtbl_cfg =
++ +++ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
++ +++ typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr =
++ +++ &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
++ +++
++ +++ memset(target, 0, sizeof(*target));
++ +++
++ +++ target->data[0] = cpu_to_le64(
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
#ifdef __BIG_ENDIAN
-- --- CTXDESC_CD_0_ENDI |
++ +++ CTXDESC_CD_0_ENDI |
#endif
-- --- CTXDESC_CD_0_R | CTXDESC_CD_0_A |
-- --- (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
-- --- CTXDESC_CD_0_AA64 |
-- --- FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
-- --- CTXDESC_CD_0_V;
-- ---
-- --- if (cd_table->stall_enabled)
-- --- val |= CTXDESC_CD_0_S;
-- --- }
-- ---
-- --- /*
-- --- * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
-- --- * "Configuration structures and configuration invalidation completion"
-- --- *
-- --- * The size of single-copy atomic reads made by the SMMU is
-- --- * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
-- --- * field within an aligned 64-bit span of a structure can be altered
-- --- * without first making the structure invalid.
-- --- */
-- --- WRITE_ONCE(cdptr[0], cpu_to_le64(val));
-- --- arm_smmu_sync_cd(master, ssid, true);
-- --- return 0;
++ +++ CTXDESC_CD_0_TCR_EPD1 |
++ +++ CTXDESC_CD_0_V |
++ +++ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
++ +++ CTXDESC_CD_0_AA64 |
++ +++ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
++ +++ CTXDESC_CD_0_R |
++ +++ CTXDESC_CD_0_A |
++ +++ CTXDESC_CD_0_ASET |
++ +++ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
++ +++ );
++ +++ target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
++ +++ CTXDESC_CD_1_TTB0_MASK);
++ +++ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
++ +++}
++ +++EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
++ +++
++ +++void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
++ +++{
++ +++ struct arm_smmu_cd target = {};
++ +++ struct arm_smmu_cd *cdptr;
++ +++
++ +++ if (!master->cd_table.cdtab)
++ +++ return;
++ +++ cdptr = arm_smmu_get_cd_ptr(master, ssid);
++ +++ if (WARN_ON(!cdptr))
++ +++ return;
++ +++ arm_smmu_write_cd_entry(master, ssid, cdptr, &target);
}
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
break;
if (!dma_pte_present(pte)) {
----- uint64_t pteval;
+++++ uint64_t pteval, tmp;
--- -- tmp_page = alloc_pgtable_page(domain->nid, gfp);
+++ ++ tmp_page = iommu_alloc_page_node(domain->nid, gfp);
if (!tmp_page)
return NULL;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
----- if (cmpxchg64(&pte->val, 0ULL, pteval))
+++++ tmp = 0ULL;
+++++ if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
--- -- free_pgtable_page(tmp_page);
+++ ++ iommu_free_page(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
LIST_HEAD(freelist);
domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
-----
----- rcu_read_lock();
----- for_each_active_iommu(iommu, drhd)
----- iommu_flush_iotlb_psi(iommu, si_domain,
----- start_vpfn, mhp->nr_pages,
----- list_empty(&freelist), 0);
----- rcu_read_unlock();
--- -- put_pages_list(&freelist);
+++ ++ iommu_put_pages_list(&freelist);
}
break;
}
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
----- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
----- unsigned long iova_pfn = IOVA_PFN(gather->start);
----- size_t size = gather->end - gather->start;
----- struct iommu_domain_info *info;
----- unsigned long start_pfn;
----- unsigned long nrpages;
----- unsigned long i;
-----
----- nrpages = aligned_nrpages(gather->start, size);
----- start_pfn = mm_to_dma_pfn_start(iova_pfn);
-----
----- xa_for_each(&dmar_domain->iommu_array, i, info)
----- iommu_flush_iotlb_psi(info->iommu, dmar_domain,
----- start_pfn, nrpages,
----- list_empty(&gather->freelist), 0);
-----
----- if (dmar_domain->nested_parent)
----- parent_domain_flush(dmar_domain, start_pfn, nrpages,
----- list_empty(&gather->freelist));
--- - put_pages_list(&gather->freelist);
+++++ cache_tag_flush_range(to_dmar_domain(domain), gather->start,
+++++ gather->end, list_empty(&gather->freelist));
- put_pages_list(&gather->freelist);
+++ ++ iommu_put_pages_list(&gather->freelist);
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return 0;
}
---- -static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
++++ +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
++++ + struct iommu_domain *domain)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
++++ + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dev_pasid_info *curr, *dev_pasid = NULL;
struct intel_iommu *iommu = info->iommu;
---- - struct dmar_domain *dmar_domain;
---- - struct iommu_domain *domain;
unsigned long flags;
---- - domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
---- - if (WARN_ON_ONCE(!domain))
---- goto out_tear_down;
----
----- /*
----- * The SVA implementation needs to handle its own stuffs like the mm
----- * notification. Before consolidating that code into iommu core, let
----- * the intel sva code handle it.
----- */
----- if (domain->type == IOMMU_DOMAIN_SVA) {
----- intel_svm_remove_dev_pasid(dev, pasid);
------ goto out_tear_down;
----- }
-----
---- - dmar_domain = to_dmar_domain(domain);
-
spin_lock_irqsave(&dmar_domain->lock, flags);
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
if (curr->dev == dev && curr->pasid == pasid) {
domain_detach_iommu(dmar_domain, iommu);
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
------out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
intel_drain_pasid_prq(dev, pasid);
}
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
.domain_alloc_user = intel_iommu_domain_alloc_user,
+++++ .domain_alloc_sva = intel_svm_domain_alloc,
.probe_device = intel_iommu_probe_device,
---- - .probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
int intel_svm_enable_prq(struct intel_iommu *iommu)
{
struct iopf_queue *iopfq;
--- -- struct page *pages;
int irq, ret;
- - pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
-- - pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
--- -- if (!pages) {
+++ ++ iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+++ ++ if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
return -ENOMEM;