aboutsummaryrefslogtreecommitdiff
path: root/sys/compat/linuxkpi/common/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/compat/linuxkpi/common/include/linux/mm.h')
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mm.h137
1 files changed, 122 insertions, 15 deletions
diff --git a/sys/compat/linuxkpi/common/include/linux/mm.h b/sys/compat/linuxkpi/common/include/linux/mm.h
index 00d102b6af4b..156b00a0c0f0 100644
--- a/sys/compat/linuxkpi/common/include/linux/mm.h
+++ b/sys/compat/linuxkpi/common/include/linux/mm.h
@@ -27,8 +27,6 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
*/
#ifndef _LINUXKPI_LINUX_MM_H_
#define _LINUXKPI_LINUX_MM_H_
@@ -37,9 +35,11 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/mm_types.h>
+#include <linux/mmzone.h>
#include <linux/pfn.h>
#include <linux/list.h>
#include <linux/mmap_lock.h>
+#include <linux/overflow.h>
#include <linux/shrinker.h>
#include <linux/page.h>
@@ -57,6 +57,8 @@ CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0);
#define VM_WRITE VM_PROT_WRITE
#define VM_EXEC VM_PROT_EXECUTE
+#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+
#define VM_PFNINTERNAL (1 << 8) /* FreeBSD private flag to vm_insert_pfn() */
#define VM_MIXEDMAP (1 << 9)
#define VM_NORESERVE (1 << 10)
@@ -159,6 +161,14 @@ virt_to_head_page(const void *p)
return (virt_to_page(p));
}
+static inline struct folio *
+virt_to_folio(const void *p)
+{
+ struct page *page = virt_to_page(p);
+
+ return (page_folio(page));
+}
+
/*
* Compute log2 of the power of two rounded up count of pages
* needed for size bytes.
@@ -177,6 +187,14 @@ get_order(unsigned long size)
return (order);
}
+/*
+ * Resolve a page into a virtual address:
+ *
+ * NOTE: This function only works for pages allocated by the kernel.
+ */
+void *linux_page_address(const struct page *);
+#define page_address(page) linux_page_address(page)
+
static inline void *
lowmem_page_address(struct page *page)
{
@@ -265,18 +283,65 @@ get_page(struct page *page)
vm_page_wire(page);
}
-extern long
-get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **,
- struct vm_area_struct **);
+static inline void
+put_page(struct page *page)
+{
+ /* `__free_page()` takes care of the refcounting (unwire). */
+ __free_page(page);
+}
+static inline void
+folio_get(struct folio *folio)
+{
+ get_page(&folio->page);
+}
+
+static inline void
+folio_put(struct folio *folio)
+{
+ put_page(&folio->page);
+}
+
+/*
+ * Linux uses the following "transparent" union so that `release_pages()`
+ * accepts both a list of `struct page` or a list of `struct folio`. This
+ * relies on the fact that a `struct folio` can be cast to a `struct page`.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+} release_pages_arg __attribute__ ((__transparent_union__));
+
+void linux_release_pages(release_pages_arg arg, int nr);
+#define release_pages(arg, nr) linux_release_pages((arg), (nr))
+
+extern long
+lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **);
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60500
+#define get_user_pages(start, nr_pages, gup_flags, pages) \
+ lkpi_get_user_pages(start, nr_pages, gup_flags, pages)
+#else
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+ lkpi_get_user_pages(start, nr_pages, gup_flags, pages)
+#endif
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60500
+static inline long
+pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ return (get_user_pages(start, nr_pages, gup_flags, pages));
+}
+#else
static inline long
pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+ return (get_user_pages(start, nr_pages, gup_flags, pages, vmas));
}
+#endif
extern int
__get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -306,12 +371,6 @@ pin_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
task, mm, start, nr_pages, gup_flags, pages, vmas);
}
-static inline void
-put_page(struct page *page)
-{
- vm_page_unwire(page, PQ_ACTIVE);
-}
-
#define unpin_user_page(page) put_page(page)
#define unpin_user_pages(pages, npages) release_pages(pages, npages)
@@ -323,6 +382,18 @@ vm_get_page_prot(unsigned long vm_flags)
return (vm_flags & VM_PROT_ALL);
}
+static inline void
+vm_flags_set(struct vm_area_struct *vma, unsigned long flags)
+{
+ vma->vm_flags |= flags;
+}
+
+static inline void
+vm_flags_clear(struct vm_area_struct *vma, unsigned long flags)
+{
+ vma->vm_flags &= ~flags;
+}
+
static inline struct page *
vmalloc_to_page(const void *addr)
{
@@ -335,14 +406,14 @@ vmalloc_to_page(const void *addr)
static inline int
trylock_page(struct page *page)
{
- return (vm_page_trylock(page));
+ return (vm_page_tryxbusy(page));
}
static inline void
unlock_page(struct page *page)
{
- vm_page_unlock(page);
+ vm_page_xunbusy(page);
}
extern int is_vmalloc_addr(const void *addr);
@@ -369,4 +440,40 @@ might_alloc(gfp_t gfp_mask __unused)
#define is_cow_mapping(flags) (false)
+static inline bool
+want_init_on_free(void)
+{
+ return (false);
+}
+
+static inline unsigned long
+folio_pfn(struct folio *folio)
+{
+ return (page_to_pfn(&folio->page));
+}
+
+static inline long
+folio_nr_pages(struct folio *folio)
+{
+ return (1);
+}
+
+static inline size_t
+folio_size(struct folio *folio)
+{
+ return (PAGE_SIZE);
+}
+
+static inline void
+folio_mark_dirty(struct folio *folio)
+{
+ set_page_dirty(&folio->page);
+}
+
+static inline void *
+folio_address(const struct folio *folio)
+{
+ return (page_address(&folio->page));
+}
+
#endif /* _LINUXKPI_LINUX_MM_H_ */