aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/iommu/iommu.h
blob: ba592ea08ff143bbdf306a4705a1d8856efb32a1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
/*-
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
 *
 * Copyright (c) 2013 The FreeBSD Foundation
 * All rights reserved.
 *
 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
 * under sponsorship from the FreeBSD Foundation.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 * $FreeBSD$
 */

#ifndef _DEV_IOMMU_IOMMU_H_
#define _DEV_IOMMU_IOMMU_H_

#include <dev/iommu/iommu_types.h>

struct bus_dma_tag_common;
struct iommu_map_entry;
TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);

RB_HEAD(iommu_gas_entries_tree, iommu_map_entry);
RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
    iommu_gas_cmp_entries);

struct iommu_qi_genseq {
	u_int gen;
	uint32_t seq;
};

struct iommu_map_entry {
	iommu_gaddr_t start;
	iommu_gaddr_t end;
	iommu_gaddr_t first;		/* Least start in subtree */
	iommu_gaddr_t last;		/* Greatest end in subtree */
	iommu_gaddr_t free_down;	/* Max free space below the
					   current R/B tree node */
	u_int flags;
	TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
	RB_ENTRY(iommu_map_entry) rb_entry;	 /* Links for domain entries */
	TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
						    dmamap_load failure */
	struct iommu_domain *domain;
	struct iommu_qi_genseq gseq;
};

struct iommu_unit {
	struct mtx lock;
	device_t dev;
	int unit;

	int dma_enabled;

	/* Busdma delayed map load */
	struct task dmamap_load_task;
	TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
	struct taskqueue *delayed_taskqueue;

	/*
	 * Bitmap of buses for which context must ignore slot:func,
	 * duplicating the page table pointer into all context table
	 * entries.  This is a client-controlled quirk to support some
	 * NTBs.
	 */
	uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)];
};

struct iommu_domain_map_ops {
	int (*map)(struct iommu_domain *domain, iommu_gaddr_t base,
	    iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
	int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base,
	    iommu_gaddr_t size, int flags);
};

/*
 * Locking annotations:
 * (u) - Protected by iommu unit lock
 * (d) - Protected by domain lock
 * (c) - Immutable after initialization
 */

struct iommu_domain {
	struct iommu_unit *iommu;	/* (c) */
	const struct iommu_domain_map_ops *ops;
	struct mtx lock;		/* (c) */
	struct task unload_task;	/* (c) */
	u_int entries_cnt;		/* (d) */
	struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
							 unload */
	struct iommu_gas_entries_tree rb_root; /* (d) */
	iommu_gaddr_t end;		/* (c) Highest address + 1 in
					   the guest AS */
	struct iommu_map_entry *first_place, *last_place; /* (d) */
	struct iommu_map_entry *msi_entry; /* (d) Arch-specific */
	iommu_gaddr_t msi_base;		/* (d) Arch-specific */
	vm_paddr_t msi_phys;		/* (d) Arch-specific */
	u_int flags;			/* (u) */
};

struct iommu_ctx {
	struct iommu_domain *domain;	/* (c) */
	struct bus_dma_tag_iommu *tag;	/* (c) Root tag */
	u_long loads;			/* atomic updates, for stat only */
	u_long unloads;			/* same */
	u_int flags;			/* (u) */
	uint16_t rid;			/* (c) pci RID */
};

/* struct iommu_ctx flags */
#define	IOMMU_CTX_FAULTED	0x0001	/* Fault was reported,
					   last_fault_rec is valid */
#define	IOMMU_CTX_DISABLED	0x0002	/* Device is disabled, the
					   ephemeral reference is kept
					   to prevent context destruction */

#define	IOMMU_DOMAIN_GAS_INITED		0x0001
#define	IOMMU_DOMAIN_PGTBL_INITED	0x0002
#define	IOMMU_DOMAIN_IDMAP		0x0010	/* Domain uses identity
						   page table */
#define	IOMMU_DOMAIN_RMRR		0x0020	/* Domain contains RMRR entry,
						   cannot be turned off */

#define	IOMMU_LOCK(unit)		mtx_lock(&(unit)->lock)
#define	IOMMU_UNLOCK(unit)		mtx_unlock(&(unit)->lock)
#define	IOMMU_ASSERT_LOCKED(unit)	mtx_assert(&(unit)->lock, MA_OWNED)

#define	IOMMU_DOMAIN_LOCK(dom)		mtx_lock(&(dom)->lock)
#define	IOMMU_DOMAIN_UNLOCK(dom)	mtx_unlock(&(dom)->lock)
#define	IOMMU_DOMAIN_ASSERT_LOCKED(dom)	mtx_assert(&(dom)->lock, MA_OWNED)

static inline bool
iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
    iommu_gaddr_t boundary)
{

	if (boundary == 0)
		return (true);
	return (start + size <= ((start + boundary) & ~(boundary - 1)));
}

void iommu_free_ctx(struct iommu_ctx *ctx);
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
    uint16_t rid, bool id_mapped, bool rmrr_init);
struct iommu_unit *iommu_find(device_t dev, bool verbose);
void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
void iommu_domain_unload(struct iommu_domain *domain,
    struct iommu_map_entries_tailq *entries, bool cansleep);

struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
    device_t dev, bool rmrr);
device_t iommu_get_requester(device_t dev, uint16_t *rid);
int iommu_init_busdma(struct iommu_unit *unit);
void iommu_fini_busdma(struct iommu_unit *unit);
struct iommu_map_entry *iommu_map_alloc_entry(struct iommu_domain *iodom,
    u_int flags);
void iommu_map_free_entry(struct iommu_domain *, struct iommu_map_entry *);
int iommu_map(struct iommu_domain *iodom,
    const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
    u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
int iommu_map_region(struct iommu_domain *domain,
    struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);

void iommu_gas_init_domain(struct iommu_domain *domain);
void iommu_gas_fini_domain(struct iommu_domain *domain);
struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain,
    u_int flags);
void iommu_gas_free_entry(struct iommu_domain *domain,
    struct iommu_map_entry *entry);
void iommu_gas_free_space(struct iommu_domain *domain,
    struct iommu_map_entry *entry);
int iommu_gas_map(struct iommu_domain *domain,
    const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
    u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
void iommu_gas_free_region(struct iommu_domain *domain,
    struct iommu_map_entry *entry);
int iommu_gas_map_region(struct iommu_domain *domain,
    struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
    iommu_gaddr_t end, struct iommu_map_entry **entry0);
int iommu_gas_reserve_region_extend(struct iommu_domain *domain,
    iommu_gaddr_t start, iommu_gaddr_t end);

void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
    const struct iommu_domain_map_ops *ops);
void iommu_domain_fini(struct iommu_domain *domain);

bool bus_dma_iommu_set_buswide(device_t dev);
int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
    vm_paddr_t start, vm_size_t length, int flags);

bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child);
struct iommu_ctx *iommu_get_dev_ctx(device_t dev);
struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx);

SYSCTL_DECL(_hw_iommu);

#endif /* !_DEV_IOMMU_IOMMU_H_ */