1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* vma.h
*
* Core VMA manipulation API implemented in vma.c.
*/
#ifndef __MM_VMA_H
#define __MM_VMA_H
/*
* VMA lock generalization
*/
struct vma_prepare {
struct vm_area_struct *vma;
struct vm_area_struct *adj_next;
struct file *file;
struct address_space *mapping;
struct anon_vma *anon_vma;
struct vm_area_struct *insert;
struct vm_area_struct *remove;
struct vm_area_struct *remove2;
};
struct unlink_vma_file_batch {
int count;
struct vm_area_struct *vmas[8];
};
/*
* vma munmap operation
*/
struct vma_munmap_struct {
struct vma_iterator *vmi;
struct mm_struct *mm;
struct vm_area_struct *vma; /* The first vma to munmap */
struct vm_area_struct *prev; /* vma before the munmap area */
struct vm_area_struct *next; /* vma after the munmap area */
struct list_head *uf; /* Userfaultfd list_head */
unsigned long start; /* Aligned start addr (inclusive) */
unsigned long end; /* Aligned end addr (exclusive) */
int vma_count; /* Number of vmas that will be removed */
unsigned long nr_pages; /* Number of pages being removed */
unsigned long locked_vm; /* Number of locked pages */
unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long data_vm;
bool unlock; /* Unlock after the munmap */
};
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
void validate_mm(struct mm_struct *mm);
#else
#define validate_mm(mm) do { } while (0)
#endif
/* Required for expand_downwards(). */
void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
/* Required for expand_downwards(). */
void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
/* Required for do_brk_flags(). */
void vma_prepare(struct vma_prepare *vp);
/* Required for do_brk_flags(). */
void init_vma_prep(struct vma_prepare *vp,
struct vm_area_struct *vma);
/* Required for do_brk_flags(). */
void vma_complete(struct vma_prepare *vp,
struct vma_iterator *vmi, struct mm_struct *mm);
int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next);
int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
/*
* init_vma_munmap() - Initializer wrapper for vma_munmap_struct
* @vms: The vma munmap struct
* @vmi: The vma iterator
* @vma: The first vm_area_struct to munmap
* @start: The aligned start address to munmap
* @end: The aligned end address to munmap
* @uf: The userfaultfd list_head
* @unlock: Unlock after the operation. Only unlocked on success
*/
static inline void init_vma_munmap(struct vma_munmap_struct *vms,
struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, struct list_head *uf,
bool unlock)
{
vms->vmi = vmi;
vms->vma = vma;
if (vma) {
vms->mm = vma->vm_mm;
vms->start = start;
vms->end = end;
} else {
vms->mm = NULL;
vms->start = vms->end = 0;
}
vms->unlock = unlock;
vms->uf = uf;
vms->vma_count = 0;
vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
}
int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach);
void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach);
/*
* abort_munmap_vmas - Undo any munmap work and free resources
*
* Reattach any detached vmas and free up the maple tree used to track the vmas.
*/
static inline void abort_munmap_vmas(struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
vma_mark_detached(vma, false);
__mt_destroy(mas_detach->tree);
}
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool unlock);
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
void remove_vma(struct vm_area_struct *vma, bool unreachable);
void unmap_region(struct mm_struct *mm, struct ma_state *mas,
struct vm_area_struct *vma, struct vm_area_struct *prev,
struct vm_area_struct *next, unsigned long start,
unsigned long end, unsigned long tree_end, bool mm_wr_locked);
/* Required by mmap_region(). */
bool
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
struct anon_vma_name *anon_name);
/* Required by mmap_region() and do_brk_flags(). */
bool
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
struct anon_vma_name *anon_name);
struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long vm_flags,
struct mempolicy *policy,
struct vm_userfaultfd_ctx uffd_ctx,
struct anon_vma_name *anon_name);
/* We are about to modify the VMA's flags. */
static inline struct vm_area_struct
*vma_modify_flags(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long new_flags)
{
return vma_modify(vmi, prev, vma, start, end, new_flags,
vma_policy(vma), vma->vm_userfaultfd_ctx,
anon_vma_name(vma));
}
/* We are about to modify the VMA's flags and/or anon_name. */
static inline struct vm_area_struct
*vma_modify_flags_name(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
unsigned long new_flags,
struct anon_vma_name *new_name)
{
return vma_modify(vmi, prev, vma, start, end, new_flags,
vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
}
/* We are about to modify the VMA's memory policy. */
static inline struct vm_area_struct
*vma_modify_policy(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct mempolicy *new_pol)
{
return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
}
/* We are about to modify the VMA's flags and/or uffd context. */
static inline struct vm_area_struct
*vma_modify_flags_uffd(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long new_flags,
struct vm_userfaultfd_ctx new_ctx)
{
return vma_modify(vmi, prev, vma, start, end, new_flags,
vma_policy(vma), new_ctx, anon_vma_name(vma));
}
struct vm_area_struct
*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff);
struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
struct vm_area_struct *vma,
unsigned long delta);
void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
struct vm_area_struct *vma);
void unlink_file_vma(struct vm_area_struct *vma);
void vma_link_file(struct vm_area_struct *vma);
int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
int mm_take_all_locks(struct mm_struct *mm);
void mm_drop_all_locks(struct mm_struct *mm);
unsigned long count_vma_pages_range(struct mm_struct *mm,
unsigned long addr, unsigned long end);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
/*
* We want to check manually if we can change individual PTEs writable
* if we can't do that automatically for all PTEs in a mapping. For
* private mappings, that's always the case when we have write
* permissions as we properly have to handle COW.
*/
if (vma->vm_flags & VM_SHARED)
return vma_wants_writenotify(vma, vma->vm_page_prot);
return !!(vma->vm_flags & VM_WRITE);
}
#ifdef CONFIG_MMU
static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
}
#endif
static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
unsigned long min)
{
return mas_prev(&vmi->mas, min);
}
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
{
if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_gfp(&vmi->mas, vma, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
/*
* These three helpers classifies VMAs for virtual memory accounting.
*/
/*
* Executable code area - executable, not writable, not stack
*/
static inline bool is_exec_mapping(vm_flags_t flags)
{
return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
}
/*
* Stack area (including shadow stacks)
*
* VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
* do_mmap() forbids all other combinations.
*/
static inline bool is_stack_mapping(vm_flags_t flags)
{
return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
}
/*
* Data area - private, writable, not stack
*/
static inline bool is_data_mapping(vm_flags_t flags)
{
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
}
static inline void vma_iter_config(struct vma_iterator *vmi,
unsigned long index, unsigned long last)
{
__mas_set_range(&vmi->mas, index, last - 1);
}
static inline void vma_iter_reset(struct vma_iterator *vmi)
{
mas_reset(&vmi->mas);
}
static inline
struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
{
return mas_prev_range(&vmi->mas, min);
}
static inline
struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
{
return mas_next_range(&vmi->mas, max);
}
static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
unsigned long max, unsigned long size)
{
return mas_empty_area(&vmi->mas, min, max - 1, size);
}
static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
unsigned long max, unsigned long size)
{
return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
}
/*
* VMA Iterator functions shared between nommu and mmap
*/
static inline int vma_iter_prealloc(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
}
static inline void vma_iter_clear(struct vma_iterator *vmi)
{
mas_store_prealloc(&vmi->mas, NULL);
}
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
return mas_walk(&vmi->mas);
}
/* Store a VMA with preallocated memory */
static inline void vma_iter_store(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.index > vma->vm_start)) {
pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
vmi->mas.index, vma->vm_start, vma->vm_start,
vma->vm_end, vmi->mas.index, vmi->mas.last);
}
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.last < vma->vm_start)) {
pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
vmi->mas.index, vmi->mas.last);
}
#endif
if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
{
return vmi->mas.index;
}
static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
{
return vmi->mas.last + 1;
}
static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
unsigned long count)
{
return mas_expected_entries(&vmi->mas, count);
}
static inline
struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
{
return mas_prev_range(&vmi->mas, 0);
}
#ifdef CONFIG_64BIT
static inline bool vma_is_sealed(struct vm_area_struct *vma)
{
return (vma->vm_flags & VM_SEALED);
}
/*
* check if a vma is sealed for modification.
* return true, if modification is allowed.
*/
static inline bool can_modify_vma(struct vm_area_struct *vma)
{
if (unlikely(vma_is_sealed(vma)))
return false;
return true;
}
bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
#else
static inline bool can_modify_vma(struct vm_area_struct *vma)
{
return true;
}
static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
{
return true;
}
#endif
#endif /* __MM_VMA_H */
|