aboutsummaryrefslogtreecommitdiff
path: root/sys/x86/iommu/intel_dmar.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/x86/iommu/intel_dmar.h')
-rw-r--r--sys/x86/iommu/intel_dmar.h116
1 files changed, 32 insertions, 84 deletions
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
index e20144094c80..188e40dec36c 100644
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -87,15 +87,15 @@ struct dmar_ctx {
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
-#define DMAR2IOMMU(dmar) &((dmar)->iommu)
+#define DMAR2IOMMU(dmar) (&((dmar)->iommu))
#define IOMMU2DMAR(dmar) \
__containerof((dmar), struct dmar_unit, iommu)
-#define DOM2IODOM(domain) &((domain)->iodom)
+#define DOM2IODOM(domain) (&((domain)->iodom))
#define IODOM2DOM(domain) \
__containerof((domain), struct dmar_domain, iodom)
-#define CTX2IOCTX(ctx) &((ctx)->context)
+#define CTX2IOCTX(ctx) (&((ctx)->context))
#define IOCTX2CTX(ctx) \
__containerof((ctx), struct dmar_ctx, context)
@@ -103,27 +103,13 @@ struct dmar_ctx {
#define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
#define DOM2DMAR(domain) ((domain)->dmar)
-struct dmar_msi_data {
- int irq;
- int irq_rid;
- struct resource *irq_res;
- void *intr_handle;
- int (*handler)(void *);
- int msi_data_reg;
- int msi_addr_reg;
- int msi_uaddr_reg;
- void (*enable_intr)(struct dmar_unit *);
- void (*disable_intr)(struct dmar_unit *);
- const char *name;
-};
-
#define DMAR_INTR_FAULT 0
#define DMAR_INTR_QI 1
#define DMAR_INTR_TOTAL 2
struct dmar_unit {
struct iommu_unit iommu;
- device_t dev;
+ struct x86_unit_common x86c;
uint16_t segment;
uint64_t base;
@@ -131,8 +117,6 @@ struct dmar_unit {
int reg_rid;
struct resource *regs;
- struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
-
/* Hardware registers cache */
uint32_t hw_ver;
uint64_t hw_cap;
@@ -156,17 +140,6 @@ struct dmar_unit {
/* QI */
int qi_enabled;
- char *inv_queue;
- vm_size_t inv_queue_size;
- uint32_t inv_queue_avail;
- uint32_t inv_queue_tail;
- volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
- descr completion */
- uint64_t inv_waitd_seq_hw_phys;
- uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
- u_int inv_waitd_gen; /* seq number generation AKA seq overflows */
- u_int inv_seq_waiters; /* count of waiters for seq */
- u_int inv_queue_full; /* informational counter */
/* IR */
int ir_enabled;
@@ -174,41 +147,11 @@ struct dmar_unit {
dmar_irte_t *irt;
u_int irte_cnt;
vmem_t *irtids;
-
- /*
- * Delayed freeing of map entries queue processing:
- *
- * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
- * queue that supports concurrent dequeues and enqueues. However,
- * there can only be a single dequeuer (accessing tlb_flush_head) and
- * a single enqueuer (accessing tlb_flush_tail) at a time. Since the
- * unit's qi_task is the only dequeuer, it can access tlb_flush_head
- * without any locking. In contrast, there may be multiple enqueuers,
- * so the enqueuers acquire the iommu unit lock to serialize their
- * accesses to tlb_flush_tail.
- *
- * In this FIFO queue implementation, the key to enabling concurrent
- * dequeues and enqueues is that the dequeuer never needs to access
- * tlb_flush_tail and the enqueuer never needs to access
- * tlb_flush_head. In particular, tlb_flush_head and tlb_flush_tail
- * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
- * update both. Instead, tlb_flush_head always points to a "zombie"
- * struct, which previously held the last dequeued item. Thus, the
- * zombie's next field actually points to the struct holding the first
- * item in the queue. When an item is dequeued, the current zombie is
- * finally freed, and the struct that held the just dequeued item
- * becomes the new zombie. When the queue is empty, tlb_flush_tail
- * also points to the zombie.
- */
- struct iommu_map_entry *tlb_flush_head;
- struct iommu_map_entry *tlb_flush_tail;
- struct task qi_task;
- struct taskqueue *qi_taskqueue;
};
-#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
-#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
-#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
+#define DMAR_LOCK(dmar) mtx_lock(&DMAR2IOMMU(dmar)->lock)
+#define DMAR_UNLOCK(dmar) mtx_unlock(&DMAR2IOMMU(dmar)->lock)
+#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&DMAR2IOMMU(dmar)->lock, MA_OWNED)
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
@@ -223,6 +166,8 @@ struct dmar_unit {
#define DMAR_BARRIER_RMRR 0
#define DMAR_BARRIER_USEQ 1
+SYSCTL_DECL(_hw_iommu_dmar);
+
struct dmar_unit *dmar_find(device_t dev, bool verbose);
struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
@@ -232,22 +177,15 @@ bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
int domain_set_agaw(struct dmar_domain *domain, int mgaw);
int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
bool allow_less);
-vm_pindex_t pglvl_max_pages(int pglvl);
int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
-iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
iommu_gaddr_t *isizep);
-struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
-void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
-void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
- struct sf_buf **sf);
-void dmar_unmap_pgtbl(struct sf_buf *sf);
int dmar_load_root_entry_ptr(struct dmar_unit *unit);
int dmar_inv_ctx_glob(struct dmar_unit *unit);
int dmar_inv_iotlb_glob(struct dmar_unit *unit);
int dmar_flush_write_bufs(struct dmar_unit *unit);
-void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
+void dmar_flush_pte_to_ram(struct dmar_unit *unit, iommu_pte_t *dst);
void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
int dmar_disable_protected_regions(struct dmar_unit *unit);
@@ -262,14 +200,14 @@ uint64_t dmar_get_timeout(void);
void dmar_update_timeout(uint64_t newval);
int dmar_fault_intr(void *arg);
-void dmar_enable_fault_intr(struct dmar_unit *unit);
-void dmar_disable_fault_intr(struct dmar_unit *unit);
+void dmar_enable_fault_intr(struct iommu_unit *unit);
+void dmar_disable_fault_intr(struct iommu_unit *unit);
int dmar_init_fault_log(struct dmar_unit *unit);
void dmar_fini_fault_log(struct dmar_unit *unit);
int dmar_qi_intr(void *arg);
-void dmar_enable_qi_intr(struct dmar_unit *unit);
-void dmar_disable_qi_intr(struct dmar_unit *unit);
+void dmar_enable_qi_intr(struct iommu_unit *unit);
+void dmar_disable_qi_intr(struct iommu_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
void dmar_qi_invalidate_locked(struct dmar_domain *domain,
@@ -286,8 +224,8 @@ vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
void put_idmap_pgtbl(vm_object_t obj);
void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size);
-int domain_alloc_pgtbl(struct dmar_domain *domain);
-void domain_free_pgtbl(struct dmar_domain *domain);
+int dmar_domain_alloc_pgtbl(struct dmar_domain *domain);
+void dmar_domain_free_pgtbl(struct dmar_domain *domain);
extern const struct iommu_domain_map_ops dmar_domain_map_ops;
int dmar_dev_depth(device_t child);
@@ -299,10 +237,16 @@ struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
bool id_mapped, bool rmrr_init);
int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
-void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
-void dmar_free_ctx(struct dmar_ctx *ctx);
+void dmar_free_ctx_locked_method(struct iommu_unit *dmar,
+ struct iommu_ctx *ctx);
+void dmar_free_ctx_method(struct iommu_ctx *ctx);
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
+struct iommu_ctx *dmar_get_ctx(struct iommu_unit *iommu, device_t dev,
+ uint16_t rid, bool id_mapped, bool rmrr_init);
+void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free,
+ bool cansleep);
+void dmar_domain_unload(struct iommu_domain *iodom,
+ struct iommu_map_entries_tailq *entries, bool cansleep);
void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
int dev_busno, const void *dev_path, int dev_path_len,
@@ -314,11 +258,15 @@ void dmar_quirks_pre_use(struct iommu_unit *dmar);
int dmar_init_irt(struct dmar_unit *unit);
void dmar_fini_irt(struct dmar_unit *unit);
+int dmar_alloc_msi_intr(device_t src, u_int *cookies, u_int count);
+int dmar_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
+ uint64_t *addr, uint32_t *data);
+int dmar_unmap_msi_intr(device_t src, u_int cookie);
+int dmar_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
+ bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo);
+int dmar_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie);
-extern iommu_haddr_t dmar_high;
extern int haw;
-extern int dmar_tbl_pagecnt;
-extern int dmar_batch_coalesce;
extern int dmar_rmrr_enable;
static inline uint32_t