amd_iommuRegular
4.4: ✅struct amd_iommu {
struct list_head list;
int index;
spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u8 *evt_buf;
u8 *ppr_log;
bool int_enabled;
bool need_sync;
struct device *iommu_dev;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
};
4.8: ✅struct amd_iommu {
struct list_head list;
int index;
spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u8 *evt_buf;
u8 *ppr_log;
bool int_enabled;
bool need_sync;
struct device *iommu_dev;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
};
4.10: ✅struct amd_iommu {
struct list_head list;
int index;
spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct device *iommu_dev;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
volatile u64 cmd_sem;
};
4.13: ✅struct amd_iommu {
struct list_head list;
int index;
spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
volatile u64 cmd_sem;
};
4.15: ✅struct amd_iommu {
struct list_head list;
int index;
spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
};
4.18: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
};
5.0: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
};
5.3: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
5.4: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
5.8: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
5.11: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
struct irq_affinity_notify intcapxt_notify;
};
5.13: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
struct irq_affinity_notify intcapxt_notify;
};
5.15: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
};
5.19: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
};
6.2: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
u64 features2;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
struct amd_iommu_pci_seg *pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
};
6.5: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
u64 features2;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
struct amd_iommu_pci_seg *pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
bool irtcachedis_enabled;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
atomic64_t cmd_sem_val;
};
6.8: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
u64 features2;
u16 devid;
u16 cap_ptr;
struct amd_iommu_pci_seg *pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
unsigned char evt_irq_name[16];
u8 *ppr_log;
unsigned char ppr_irq_name[16];
u8 *ga_log;
unsigned char ga_irq_name[16];
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
bool irtcachedis_enabled;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 *cmd_sem;
atomic64_t cmd_sem_val;
};
arm64: Absent ⚠️
armhf: Absent ⚠️
ppc64el: Absent ⚠️
riscv64: Absent ⚠️
aws: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
azure: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
gcp: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
lowlatency: ✅struct amd_iommu {
struct list_head list;
int index;
raw_spinlock_t lock;
struct pci_dev *dev;
struct pci_dev *root_pdev;
u64 mmio_phys;
u64 mmio_phys_end;
u8 *mmio_base;
u32 cap;
u8 acpi_flags;
u64 features;
bool is_iommu_v2;
u16 devid;
u16 cap_ptr;
u16 pci_seg;
u64 exclusion_start;
u64 exclusion_length;
u8 *cmd_buf;
u32 cmd_buf_head;
u32 cmd_buf_tail;
u8 *evt_buf;
u8 *ppr_log;
u8 *ga_log;
u8 *ga_log_tail;
bool int_enabled;
bool need_sync;
struct iommu_device iommu;
u32 stored_addr_lo;
u32 stored_addr_hi;
u32 stored_l1[108];
u32 stored_l2[131];
u8 max_banks;
u8 max_counters;
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
u32 flags;
volatile u64 cmd_sem;
struct irq_affinity_notify intcapxt_notify;
};
Regular
4.4 and 4.8 ✅
4.8 and 4.10 ⚠️u8 *ga_log
u8 *ga_log_tail
struct amd_irte_ops *irte_ops
volatile u64 cmd_sem
4.10 and 4.13 ⚠️u32 cmd_buf_head
u32 cmd_buf_tail
struct iommu_device iommu
struct device *iommu_dev
4.13 and 4.15 ⚠️u32 flags
4.15 and 4.18 ⚠️spinlock_t lock ➡️ raw_spinlock_t lock
4.18 and 5.0 ✅
5.0 and 5.3 ⚠️struct irq_affinity_notify intcapxt_notify
5.3 and 5.4 ✅
5.4 and 5.8 ✅
5.8 and 5.11 ⚠️u64 cmd_sem_val
volatile u64 cmd_sem ➡️ volatile u64 *cmd_sem
5.11 and 5.13 ✅
5.13 and 5.15 ⚠️struct irq_affinity_notify intcapxt_notify
5.15 and 5.19 ✅
5.19 and 6.2 ⚠️u64 features2
struct irq_domain *msi_domain
u16 pci_seg ➡️ struct amd_iommu_pci_seg *pci_seg
6.2 and 6.5 ⚠️bool irtcachedis_enabled
u64 cmd_sem_val ➡️ atomic64_t cmd_sem_val
6.5 and 6.8 ⚠️unsigned char evt_irq_name[16]
unsigned char ppr_irq_name[16]
unsigned char ga_irq_name[16]
bool is_iommu_v2
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅