drm/ttm: convert to unified vma offset manager
[dragonfly.git] / sys / dev / drm / include / linux / mm.h
CommitLineData
33c8403b
FT
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
87d00696 6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
c6002f72 7 * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
33c8403b
FT
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
15 * disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31#ifndef _LINUX_MM_H_
32#define _LINUX_MM_H_
33
34#include <linux/errno.h>
d6aa1cc5 35
f0bba3d1 36#include <linux/mmdebug.h>
9db5a7ce 37#include <linux/gfp.h>
19c468b4 38#include <linux/bug.h>
d6aa1cc5 39#include <linux/list.h>
f0bba3d1 40#include <linux/mmzone.h>
d6aa1cc5
FT
41#include <linux/rbtree.h>
42#include <linux/atomic.h>
43#include <linux/mm_types.h>
44#include <linux/err.h>
33c8403b 45
8b1a6a38 46#include <asm/page.h>
2cd2ed12 47#include <asm/pgtable.h>
c6002f72 48#include <asm/processor.h>
8b1a6a38 49
a34b4168
MD
50struct vm_operations_struct;
51
f0bba3d1
FT
52static inline struct page *
53nth_page(struct page *page, int n)
f4374057
FT
54{
55 return page + n;
56}
57
33c8403b
FT
58#define PAGE_ALIGN(addr) round_page(addr)
59
a34b4168
MD
60struct vm_fault {
61 uintptr_t virtual_address;
62};
63
64#define VM_FAULT_NOPAGE 0x0001
65#define VM_FAULT_SIGBUS 0x0002
66#define VM_FAULT_OOM 0x0004
67
33c8403b
FT
68struct vm_area_struct {
69 vm_offset_t vm_start;
70 vm_offset_t vm_end;
71 vm_offset_t vm_pgoff;
72 vm_paddr_t vm_pfn; /* PFN For mmap. */
73 vm_memattr_t vm_page_prot;
a34b4168
MD
74 void *vm_private_data;
75 int vm_flags;
76 const struct vm_operations_struct *vm_ops;
77};
78
79#define VM_DONTDUMP 0x0001
80#define VM_DONTEXPAND 0x0002
81#define VM_IO 0x0004
82#define VM_MIXEDMAP 0x0008
83
84struct vm_operations_struct {
85 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
86 void (*open)(struct vm_area_struct *vma);
87 void (*close)(struct vm_area_struct *vma);
33c8403b
FT
88};
89
90/*
91 * Compute log2 of the power of two rounded up count of pages
92 * needed for size bytes.
93 */
94static inline int
95get_order(unsigned long size)
96{
97 int order;
98
99 size = (size - 1) >> PAGE_SHIFT;
100 order = 0;
101 while (size) {
102 order++;
103 size >>= 1;
104 }
105 return (order);
106}
107
108/*
109 * This only works via mmap ops.
110 */
111static inline int
112io_remap_pfn_range(struct vm_area_struct *vma,
113 unsigned long addr, unsigned long pfn, unsigned long size,
114 vm_memattr_t prot)
115{
116 vma->vm_page_prot = prot;
117 vma->vm_pfn = pfn;
118
119 return (0);
120}
121
db74a0c1
FT
122static inline unsigned long
123vma_pages(struct vm_area_struct *vma)
124{
125 unsigned long size;
126
127 size = vma->vm_end - vma->vm_start;
128
129 return size >> PAGE_SHIFT;
130}
131
d0f32dcc
FT
132#define offset_in_page(off) ((off) & PAGE_MASK)
133
612428b2 134static inline void
f0bba3d1 135set_page_dirty(struct page *page)
612428b2 136{
f0bba3d1 137 vm_page_dirty((struct vm_page *)page);
612428b2
FT
138}
139
87d00696
MD
140/*
141 * Allocate multiple contiguous pages. The DragonFly code can only do
142 * multiple allocations via the free page reserve. Linux does not appear
143 * to restrict the address space, so neither do we.
144 */
145static inline struct vm_page *
146alloc_pages(int flags, u_int order)
147{
148 size_t bytes = PAGE_SIZE << order;
149 struct vm_page *pgs;
150
151 pgs = vm_page_alloc_contig(0LLU, ~0LLU, bytes, bytes, bytes,
152 VM_MEMATTR_DEFAULT);
153 kprintf("alloc_pages order %u vm_pages=%p\n", order, pgs);
154 return pgs;
155}
156
157/*
158 * Free multiple contiguous pages
159 */
160static inline void
161__free_pages(struct vm_page *pgs, u_int order)
162{
163 size_t bytes = PAGE_SIZE << order;
164
165 vm_page_free_contig(pgs, bytes);
166}
167
326e34b8
FT
168static inline void
169get_page(struct vm_page *page)
170{
171 vm_page_hold(page);
172}
173
86e5f7fc
FT
174extern vm_paddr_t Realmem;
175
176static inline unsigned long get_num_physpages(void)
177{
178 return Realmem / PAGE_SIZE;
179}
180
646a8671
FT
181int is_vmalloc_addr(const void *x);
182
3306aed3
FT
183static inline void
184unmap_mapping_range(struct address_space *mapping,
185 loff_t const holebegin, loff_t const holelen, int even_cows)
186{
187}
188
33c8403b 189#endif /* _LINUX_MM_H_ */