1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pagewalk.h>
3#include <linux/mm_inline.h>
4#include <linux/hugetlb.h>
5#include <linux/huge_mm.h>
6#include <linux/mount.h>
7#include <linux/ksm.h>
8#include <linux/seq_file.h>
9#include <linux/highmem.h>
10#include <linux/ptrace.h>
11#include <linux/slab.h>
12#include <linux/pagemap.h>
13#include <linux/mempolicy.h>
14#include <linux/rmap.h>
15#include <linux/swap.h>
16#include <linux/sched/mm.h>
17#include <linux/swapops.h>
18#include <linux/mmu_notifier.h>
19#include <linux/page_idle.h>
20#include <linux/shmem_fs.h>
21#include <linux/uaccess.h>
22#include <linux/pkeys.h>
23#include <linux/minmax.h>
24#include <linux/overflow.h>
25
26#include <asm/elf.h>
27#include <asm/tlb.h>
28#include <asm/tlbflush.h>
29#include "internal.h"
30
31#define SEQ_PUT_DEC(str, val) \
32		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
33void task_mem(struct seq_file *m, struct mm_struct *mm)
34{
35	unsigned long text, lib, swap, anon, file, shmem;
36	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
37
38	anon = get_mm_counter(mm, MM_ANONPAGES);
39	file = get_mm_counter(mm, MM_FILEPAGES);
40	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
41
42	/*
43	 * Note: to minimize their overhead, mm maintains hiwater_vm and
44	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
45	 * collector of these hiwater stats must therefore get total_vm
46	 * and rss too, which will usually be the higher.  Barriers? not
47	 * worth the effort, such snapshots can always be inconsistent.
48	 */
49	hiwater_vm = total_vm = mm->total_vm;
50	if (hiwater_vm < mm->hiwater_vm)
51		hiwater_vm = mm->hiwater_vm;
52	hiwater_rss = total_rss = anon + file + shmem;
53	if (hiwater_rss < mm->hiwater_rss)
54		hiwater_rss = mm->hiwater_rss;
55
56	/* split executable areas between text and lib */
57	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
58	text = min(text, mm->exec_vm << PAGE_SHIFT);
59	lib = (mm->exec_vm << PAGE_SHIFT) - text;
60
61	swap = get_mm_counter(mm, MM_SWAPENTS);
62	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
63	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
64	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
65	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
66	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
67	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
68	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
69	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
70	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
71	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
72	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
73	seq_put_decimal_ull_width(m,
74		    " kB\nVmExe:\t", text >> 10, 8);
75	seq_put_decimal_ull_width(m,
76		    " kB\nVmLib:\t", lib >> 10, 8);
77	seq_put_decimal_ull_width(m,
78		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
79	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
80	seq_puts(m, " kB\n");
81	hugetlb_report_usage(m, mm);
82}
83#undef SEQ_PUT_DEC
84
85unsigned long task_vsize(struct mm_struct *mm)
86{
87	return PAGE_SIZE * mm->total_vm;
88}
89
90unsigned long task_statm(struct mm_struct *mm,
91			 unsigned long *shared, unsigned long *text,
92			 unsigned long *data, unsigned long *resident)
93{
94	*shared = get_mm_counter(mm, MM_FILEPAGES) +
95			get_mm_counter(mm, MM_SHMEMPAGES);
96	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
97								>> PAGE_SHIFT;
98	*data = mm->data_vm + mm->stack_vm;
99	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
100	return mm->total_vm;
101}
102
103#ifdef CONFIG_NUMA
104/*
105 * Save get_task_policy() for show_numa_map().
106 */
107static void hold_task_mempolicy(struct proc_maps_private *priv)
108{
109	struct task_struct *task = priv->task;
110
111	task_lock(task);
112	priv->task_mempolicy = get_task_policy(task);
113	mpol_get(priv->task_mempolicy);
114	task_unlock(task);
115}
116static void release_task_mempolicy(struct proc_maps_private *priv)
117{
118	mpol_put(priv->task_mempolicy);
119}
120#else
121static void hold_task_mempolicy(struct proc_maps_private *priv)
122{
123}
124static void release_task_mempolicy(struct proc_maps_private *priv)
125{
126}
127#endif
128
129static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
130						loff_t *ppos)
131{
132	struct vm_area_struct *vma = vma_next(&priv->iter);
133
134	if (vma) {
135		*ppos = vma->vm_start;
136	} else {
137		*ppos = -2UL;
138		vma = get_gate_vma(priv->mm);
139	}
140
141	return vma;
142}
143
144static void *m_start(struct seq_file *m, loff_t *ppos)
145{
146	struct proc_maps_private *priv = m->private;
147	unsigned long last_addr = *ppos;
148	struct mm_struct *mm;
149
150	/* See m_next(). Zero at the start or after lseek. */
151	if (last_addr == -1UL)
152		return NULL;
153
154	priv->task = get_proc_task(priv->inode);
155	if (!priv->task)
156		return ERR_PTR(-ESRCH);
157
158	mm = priv->mm;
159	if (!mm || !mmget_not_zero(mm)) {
160		put_task_struct(priv->task);
161		priv->task = NULL;
162		return NULL;
163	}
164
165	if (mmap_read_lock_killable(mm)) {
166		mmput(mm);
167		put_task_struct(priv->task);
168		priv->task = NULL;
169		return ERR_PTR(-EINTR);
170	}
171
172	vma_iter_init(&priv->iter, mm, last_addr);
173	hold_task_mempolicy(priv);
174	if (last_addr == -2UL)
175		return get_gate_vma(mm);
176
177	return proc_get_vma(priv, ppos);
178}
179
180static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
181{
182	if (*ppos == -2UL) {
183		*ppos = -1UL;
184		return NULL;
185	}
186	return proc_get_vma(m->private, ppos);
187}
188
189static void m_stop(struct seq_file *m, void *v)
190{
191	struct proc_maps_private *priv = m->private;
192	struct mm_struct *mm = priv->mm;
193
194	if (!priv->task)
195		return;
196
197	release_task_mempolicy(priv);
198	mmap_read_unlock(mm);
199	mmput(mm);
200	put_task_struct(priv->task);
201	priv->task = NULL;
202}
203
204static int proc_maps_open(struct inode *inode, struct file *file,
205			const struct seq_operations *ops, int psize)
206{
207	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
208
209	if (!priv)
210		return -ENOMEM;
211
212	priv->inode = inode;
213	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
214	if (IS_ERR(priv->mm)) {
215		int err = PTR_ERR(priv->mm);
216
217		seq_release_private(inode, file);
218		return err;
219	}
220
221	return 0;
222}
223
224static int proc_map_release(struct inode *inode, struct file *file)
225{
226	struct seq_file *seq = file->private_data;
227	struct proc_maps_private *priv = seq->private;
228
229	if (priv->mm)
230		mmdrop(priv->mm);
231
232	return seq_release_private(inode, file);
233}
234
235static int do_maps_open(struct inode *inode, struct file *file,
236			const struct seq_operations *ops)
237{
238	return proc_maps_open(inode, file, ops,
239				sizeof(struct proc_maps_private));
240}
241
242static void show_vma_header_prefix(struct seq_file *m,
243				   unsigned long start, unsigned long end,
244				   vm_flags_t flags, unsigned long long pgoff,
245				   dev_t dev, unsigned long ino)
246{
247	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
248	seq_put_hex_ll(m, NULL, start, 8);
249	seq_put_hex_ll(m, "-", end, 8);
250	seq_putc(m, ' ');
251	seq_putc(m, flags & VM_READ ? 'r' : '-');
252	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
253	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
254	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
255	seq_put_hex_ll(m, " ", pgoff, 8);
256	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
257	seq_put_hex_ll(m, ":", MINOR(dev), 2);
258	seq_put_decimal_ull(m, " ", ino);
259	seq_putc(m, ' ');
260}
261
262static void
263show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
264{
265	struct anon_vma_name *anon_name = NULL;
266	struct mm_struct *mm = vma->vm_mm;
267	struct file *file = vma->vm_file;
268	vm_flags_t flags = vma->vm_flags;
269	unsigned long ino = 0;
270	unsigned long long pgoff = 0;
271	unsigned long start, end;
272	dev_t dev = 0;
273	const char *name = NULL;
274
275	if (file) {
276		const struct inode *inode = file_user_inode(vma->vm_file);
277
278		dev = inode->i_sb->s_dev;
279		ino = inode->i_ino;
280		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
281	}
282
283	start = vma->vm_start;
284	end = vma->vm_end;
285	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
286	if (mm)
287		anon_name = anon_vma_name(vma);
288
289	/*
290	 * Print the dentry name for named mappings, and a
291	 * special [heap] marker for the heap:
292	 */
293	if (file) {
294		seq_pad(m, ' ');
295		/*
296		 * If user named this anon shared memory via
297		 * prctl(PR_SET_VMA ..., use the provided name.
298		 */
299		if (anon_name)
300			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
301		else
302			seq_path(m, file_user_path(file), "\n");
303		goto done;
304	}
305
306	if (vma->vm_ops && vma->vm_ops->name) {
307		name = vma->vm_ops->name(vma);
308		if (name)
309			goto done;
310	}
311
312	name = arch_vma_name(vma);
313	if (!name) {
314		if (!mm) {
315			name = "[vdso]";
316			goto done;
317		}
318
319		if (vma_is_initial_heap(vma)) {
320			name = "[heap]";
321			goto done;
322		}
323
324		if (vma_is_initial_stack(vma)) {
325			name = "[stack]";
326			goto done;
327		}
328
329		if (anon_name) {
330			seq_pad(m, ' ');
331			seq_printf(m, "[anon:%s]", anon_name->name);
332		}
333	}
334
335done:
336	if (name) {
337		seq_pad(m, ' ');
338		seq_puts(m, name);
339	}
340	seq_putc(m, '\n');
341}
342
343static int show_map(struct seq_file *m, void *v)
344{
345	show_map_vma(m, v);
346	return 0;
347}
348
349static const struct seq_operations proc_pid_maps_op = {
350	.start	= m_start,
351	.next	= m_next,
352	.stop	= m_stop,
353	.show	= show_map
354};
355
356static int pid_maps_open(struct inode *inode, struct file *file)
357{
358	return do_maps_open(inode, file, &proc_pid_maps_op);
359}
360
361const struct file_operations proc_pid_maps_operations = {
362	.open		= pid_maps_open,
363	.read		= seq_read,
364	.llseek		= seq_lseek,
365	.release	= proc_map_release,
366};
367
368/*
369 * Proportional Set Size(PSS): my share of RSS.
370 *
371 * PSS of a process is the count of pages it has in memory, where each
372 * page is divided by the number of processes sharing it.  So if a
373 * process has 1000 pages all to itself, and 1000 shared with one other
374 * process, its PSS will be 1500.
375 *
376 * To keep (accumulated) division errors low, we adopt a 64bit
377 * fixed-point pss counter to minimize division errors. So (pss >>
378 * PSS_SHIFT) would be the real byte count.
379 *
380 * A shift of 12 before division means (assuming 4K page size):
381 * 	- 1M 3-user-pages add up to 8KB errors;
382 * 	- supports mapcount up to 2^24, or 16M;
383 * 	- supports PSS up to 2^52 bytes, or 4PB.
384 */
385#define PSS_SHIFT 12
386
387#ifdef CONFIG_PROC_PAGE_MONITOR
388struct mem_size_stats {
389	unsigned long resident;
390	unsigned long shared_clean;
391	unsigned long shared_dirty;
392	unsigned long private_clean;
393	unsigned long private_dirty;
394	unsigned long referenced;
395	unsigned long anonymous;
396	unsigned long lazyfree;
397	unsigned long anonymous_thp;
398	unsigned long shmem_thp;
399	unsigned long file_thp;
400	unsigned long swap;
401	unsigned long shared_hugetlb;
402	unsigned long private_hugetlb;
403	unsigned long ksm;
404	u64 pss;
405	u64 pss_anon;
406	u64 pss_file;
407	u64 pss_shmem;
408	u64 pss_dirty;
409	u64 pss_locked;
410	u64 swap_pss;
411};
412
413static void smaps_page_accumulate(struct mem_size_stats *mss,
414		struct folio *folio, unsigned long size, unsigned long pss,
415		bool dirty, bool locked, bool private)
416{
417	mss->pss += pss;
418
419	if (folio_test_anon(folio))
420		mss->pss_anon += pss;
421	else if (folio_test_swapbacked(folio))
422		mss->pss_shmem += pss;
423	else
424		mss->pss_file += pss;
425
426	if (locked)
427		mss->pss_locked += pss;
428
429	if (dirty || folio_test_dirty(folio)) {
430		mss->pss_dirty += pss;
431		if (private)
432			mss->private_dirty += size;
433		else
434			mss->shared_dirty += size;
435	} else {
436		if (private)
437			mss->private_clean += size;
438		else
439			mss->shared_clean += size;
440	}
441}
442
443static void smaps_account(struct mem_size_stats *mss, struct page *page,
444		bool compound, bool young, bool dirty, bool locked,
445		bool migration)
446{
447	struct folio *folio = page_folio(page);
448	int i, nr = compound ? compound_nr(page) : 1;
449	unsigned long size = nr * PAGE_SIZE;
450
451	/*
452	 * First accumulate quantities that depend only on |size| and the type
453	 * of the compound page.
454	 */
455	if (folio_test_anon(folio)) {
456		mss->anonymous += size;
457		if (!folio_test_swapbacked(folio) && !dirty &&
458		    !folio_test_dirty(folio))
459			mss->lazyfree += size;
460	}
461
462	if (folio_test_ksm(folio))
463		mss->ksm += size;
464
465	mss->resident += size;
466	/* Accumulate the size in pages that have been accessed. */
467	if (young || folio_test_young(folio) || folio_test_referenced(folio))
468		mss->referenced += size;
469
470	/*
471	 * Then accumulate quantities that may depend on sharing, or that may
472	 * differ page-by-page.
473	 *
474	 * refcount == 1 guarantees the page is mapped exactly once.
475	 * If any subpage of the compound page mapped with PTE it would elevate
476	 * the refcount.
477	 *
478	 * The page_mapcount() is called to get a snapshot of the mapcount.
479	 * Without holding the page lock this snapshot can be slightly wrong as
480	 * we cannot always read the mapcount atomically.  It is not safe to
481	 * call page_mapcount() even with PTL held if the page is not mapped,
482	 * especially for migration entries.  Treat regular migration entries
483	 * as mapcount == 1.
484	 */
485	if ((folio_ref_count(folio) == 1) || migration) {
486		smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
487				dirty, locked, true);
488		return;
489	}
490	for (i = 0; i < nr; i++, page++) {
491		int mapcount = page_mapcount(page);
492		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
493		if (mapcount >= 2)
494			pss /= mapcount;
495		smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
496				dirty, locked, mapcount < 2);
497	}
498}
499
500#ifdef CONFIG_SHMEM
501static int smaps_pte_hole(unsigned long addr, unsigned long end,
502			  __always_unused int depth, struct mm_walk *walk)
503{
504	struct mem_size_stats *mss = walk->private;
505	struct vm_area_struct *vma = walk->vma;
506
507	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
508					      linear_page_index(vma, addr),
509					      linear_page_index(vma, end));
510
511	return 0;
512}
513#else
514#define smaps_pte_hole		NULL
515#endif /* CONFIG_SHMEM */
516
517static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
518{
519#ifdef CONFIG_SHMEM
520	if (walk->ops->pte_hole) {
521		/* depth is not used */
522		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
523	}
524#endif
525}
526
527static void smaps_pte_entry(pte_t *pte, unsigned long addr,
528		struct mm_walk *walk)
529{
530	struct mem_size_stats *mss = walk->private;
531	struct vm_area_struct *vma = walk->vma;
532	bool locked = !!(vma->vm_flags & VM_LOCKED);
533	struct page *page = NULL;
534	bool migration = false, young = false, dirty = false;
535	pte_t ptent = ptep_get(pte);
536
537	if (pte_present(ptent)) {
538		page = vm_normal_page(vma, addr, ptent);
539		young = pte_young(ptent);
540		dirty = pte_dirty(ptent);
541	} else if (is_swap_pte(ptent)) {
542		swp_entry_t swpent = pte_to_swp_entry(ptent);
543
544		if (!non_swap_entry(swpent)) {
545			int mapcount;
546
547			mss->swap += PAGE_SIZE;
548			mapcount = swp_swapcount(swpent);
549			if (mapcount >= 2) {
550				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
551
552				do_div(pss_delta, mapcount);
553				mss->swap_pss += pss_delta;
554			} else {
555				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
556			}
557		} else if (is_pfn_swap_entry(swpent)) {
558			if (is_migration_entry(swpent))
559				migration = true;
560			page = pfn_swap_entry_to_page(swpent);
561		}
562	} else {
563		smaps_pte_hole_lookup(addr, walk);
564		return;
565	}
566
567	if (!page)
568		return;
569
570	smaps_account(mss, page, false, young, dirty, locked, migration);
571}
572
573#ifdef CONFIG_TRANSPARENT_HUGEPAGE
574static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
575		struct mm_walk *walk)
576{
577	struct mem_size_stats *mss = walk->private;
578	struct vm_area_struct *vma = walk->vma;
579	bool locked = !!(vma->vm_flags & VM_LOCKED);
580	struct page *page = NULL;
581	struct folio *folio;
582	bool migration = false;
583
584	if (pmd_present(*pmd)) {
585		page = vm_normal_page_pmd(vma, addr, *pmd);
586	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
587		swp_entry_t entry = pmd_to_swp_entry(*pmd);
588
589		if (is_migration_entry(entry)) {
590			migration = true;
591			page = pfn_swap_entry_to_page(entry);
592		}
593	}
594	if (IS_ERR_OR_NULL(page))
595		return;
596	folio = page_folio(page);
597	if (folio_test_anon(folio))
598		mss->anonymous_thp += HPAGE_PMD_SIZE;
599	else if (folio_test_swapbacked(folio))
600		mss->shmem_thp += HPAGE_PMD_SIZE;
601	else if (folio_is_zone_device(folio))
602		/* pass */;
603	else
604		mss->file_thp += HPAGE_PMD_SIZE;
605
606	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
607		      locked, migration);
608}
609#else
610static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
611		struct mm_walk *walk)
612{
613}
614#endif
615
616static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
617			   struct mm_walk *walk)
618{
619	struct vm_area_struct *vma = walk->vma;
620	pte_t *pte;
621	spinlock_t *ptl;
622
623	ptl = pmd_trans_huge_lock(pmd, vma);
624	if (ptl) {
625		smaps_pmd_entry(pmd, addr, walk);
626		spin_unlock(ptl);
627		goto out;
628	}
629
630	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
631	if (!pte) {
632		walk->action = ACTION_AGAIN;
633		return 0;
634	}
635	for (; addr != end; pte++, addr += PAGE_SIZE)
636		smaps_pte_entry(pte, addr, walk);
637	pte_unmap_unlock(pte - 1, ptl);
638out:
639	cond_resched();
640	return 0;
641}
642
643static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
644{
645	/*
646	 * Don't forget to update Documentation/ on changes.
647	 */
648	static const char mnemonics[BITS_PER_LONG][2] = {
649		/*
650		 * In case if we meet a flag we don't know about.
651		 */
652		[0 ... (BITS_PER_LONG-1)] = "??",
653
654		[ilog2(VM_READ)]	= "rd",
655		[ilog2(VM_WRITE)]	= "wr",
656		[ilog2(VM_EXEC)]	= "ex",
657		[ilog2(VM_SHARED)]	= "sh",
658		[ilog2(VM_MAYREAD)]	= "mr",
659		[ilog2(VM_MAYWRITE)]	= "mw",
660		[ilog2(VM_MAYEXEC)]	= "me",
661		[ilog2(VM_MAYSHARE)]	= "ms",
662		[ilog2(VM_GROWSDOWN)]	= "gd",
663		[ilog2(VM_PFNMAP)]	= "pf",
664		[ilog2(VM_LOCKED)]	= "lo",
665		[ilog2(VM_IO)]		= "io",
666		[ilog2(VM_SEQ_READ)]	= "sr",
667		[ilog2(VM_RAND_READ)]	= "rr",
668		[ilog2(VM_DONTCOPY)]	= "dc",
669		[ilog2(VM_DONTEXPAND)]	= "de",
670		[ilog2(VM_LOCKONFAULT)]	= "lf",
671		[ilog2(VM_ACCOUNT)]	= "ac",
672		[ilog2(VM_NORESERVE)]	= "nr",
673		[ilog2(VM_HUGETLB)]	= "ht",
674		[ilog2(VM_SYNC)]	= "sf",
675		[ilog2(VM_ARCH_1)]	= "ar",
676		[ilog2(VM_WIPEONFORK)]	= "wf",
677		[ilog2(VM_DONTDUMP)]	= "dd",
678#ifdef CONFIG_ARM64_BTI
679		[ilog2(VM_ARM64_BTI)]	= "bt",
680#endif
681#ifdef CONFIG_MEM_SOFT_DIRTY
682		[ilog2(VM_SOFTDIRTY)]	= "sd",
683#endif
684		[ilog2(VM_MIXEDMAP)]	= "mm",
685		[ilog2(VM_HUGEPAGE)]	= "hg",
686		[ilog2(VM_NOHUGEPAGE)]	= "nh",
687		[ilog2(VM_MERGEABLE)]	= "mg",
688		[ilog2(VM_UFFD_MISSING)]= "um",
689		[ilog2(VM_UFFD_WP)]	= "uw",
690#ifdef CONFIG_ARM64_MTE
691		[ilog2(VM_MTE)]		= "mt",
692		[ilog2(VM_MTE_ALLOWED)]	= "",
693#endif
694#ifdef CONFIG_ARCH_HAS_PKEYS
695		/* These come out via ProtectionKey: */
696		[ilog2(VM_PKEY_BIT0)]	= "",
697		[ilog2(VM_PKEY_BIT1)]	= "",
698		[ilog2(VM_PKEY_BIT2)]	= "",
699		[ilog2(VM_PKEY_BIT3)]	= "",
700#if VM_PKEY_BIT4
701		[ilog2(VM_PKEY_BIT4)]	= "",
702#endif
703#endif /* CONFIG_ARCH_HAS_PKEYS */
704#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
705		[ilog2(VM_UFFD_MINOR)]	= "ui",
706#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
707#ifdef CONFIG_X86_USER_SHADOW_STACK
708		[ilog2(VM_SHADOW_STACK)] = "ss",
709#endif
710	};
711	size_t i;
712
713	seq_puts(m, "VmFlags: ");
714	for (i = 0; i < BITS_PER_LONG; i++) {
715		if (!mnemonics[i][0])
716			continue;
717		if (vma->vm_flags & (1UL << i)) {
718			seq_putc(m, mnemonics[i][0]);
719			seq_putc(m, mnemonics[i][1]);
720			seq_putc(m, ' ');
721		}
722	}
723	seq_putc(m, '\n');
724}
725
726#ifdef CONFIG_HUGETLB_PAGE
727static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
728				 unsigned long addr, unsigned long end,
729				 struct mm_walk *walk)
730{
731	struct mem_size_stats *mss = walk->private;
732	struct vm_area_struct *vma = walk->vma;
733	pte_t ptent = huge_ptep_get(pte);
734	struct folio *folio = NULL;
735
736	if (pte_present(ptent)) {
737		folio = page_folio(pte_page(ptent));
738	} else if (is_swap_pte(ptent)) {
739		swp_entry_t swpent = pte_to_swp_entry(ptent);
740
741		if (is_pfn_swap_entry(swpent))
742			folio = pfn_swap_entry_folio(swpent);
743	}
744	if (folio) {
745		if (folio_likely_mapped_shared(folio) ||
746		    hugetlb_pmd_shared(pte))
747			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
748		else
749			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
750	}
751	return 0;
752}
753#else
754#define smaps_hugetlb_range	NULL
755#endif /* HUGETLB_PAGE */
756
757static const struct mm_walk_ops smaps_walk_ops = {
758	.pmd_entry		= smaps_pte_range,
759	.hugetlb_entry		= smaps_hugetlb_range,
760	.walk_lock		= PGWALK_RDLOCK,
761};
762
763static const struct mm_walk_ops smaps_shmem_walk_ops = {
764	.pmd_entry		= smaps_pte_range,
765	.hugetlb_entry		= smaps_hugetlb_range,
766	.pte_hole		= smaps_pte_hole,
767	.walk_lock		= PGWALK_RDLOCK,
768};
769
770/*
771 * Gather mem stats from @vma with the indicated beginning
772 * address @start, and keep them in @mss.
773 *
774 * Use vm_start of @vma as the beginning address if @start is 0.
775 */
776static void smap_gather_stats(struct vm_area_struct *vma,
777		struct mem_size_stats *mss, unsigned long start)
778{
779	const struct mm_walk_ops *ops = &smaps_walk_ops;
780
781	/* Invalid start */
782	if (start >= vma->vm_end)
783		return;
784
785	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
786		/*
787		 * For shared or readonly shmem mappings we know that all
788		 * swapped out pages belong to the shmem object, and we can
789		 * obtain the swap value much more efficiently. For private
790		 * writable mappings, we might have COW pages that are
791		 * not affected by the parent swapped out pages of the shmem
792		 * object, so we have to distinguish them during the page walk.
793		 * Unless we know that the shmem object (or the part mapped by
794		 * our VMA) has no swapped out pages at all.
795		 */
796		unsigned long shmem_swapped = shmem_swap_usage(vma);
797
798		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
799					!(vma->vm_flags & VM_WRITE))) {
800			mss->swap += shmem_swapped;
801		} else {
802			ops = &smaps_shmem_walk_ops;
803		}
804	}
805
806	/* mmap_lock is held in m_start */
807	if (!start)
808		walk_page_vma(vma, ops, mss);
809	else
810		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
811}
812
813#define SEQ_PUT_DEC(str, val) \
814		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
815
816/* Show the contents common for smaps and smaps_rollup */
817static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
818	bool rollup_mode)
819{
820	SEQ_PUT_DEC("Rss:            ", mss->resident);
821	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
822	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
823	if (rollup_mode) {
824		/*
825		 * These are meaningful only for smaps_rollup, otherwise two of
826		 * them are zero, and the other one is the same as Pss.
827		 */
828		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
829			mss->pss_anon >> PSS_SHIFT);
830		SEQ_PUT_DEC(" kB\nPss_File:       ",
831			mss->pss_file >> PSS_SHIFT);
832		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
833			mss->pss_shmem >> PSS_SHIFT);
834	}
835	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
836	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
837	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
838	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
839	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
840	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
841	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
842	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
843	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
844	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
845	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
846	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
847	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
848				  mss->private_hugetlb >> 10, 7);
849	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
850	SEQ_PUT_DEC(" kB\nSwapPss:        ",
851					mss->swap_pss >> PSS_SHIFT);
852	SEQ_PUT_DEC(" kB\nLocked:         ",
853					mss->pss_locked >> PSS_SHIFT);
854	seq_puts(m, " kB\n");
855}
856
857static int show_smap(struct seq_file *m, void *v)
858{
859	struct vm_area_struct *vma = v;
860	struct mem_size_stats mss = {};
861
862	smap_gather_stats(vma, &mss, 0);
863
864	show_map_vma(m, vma);
865
866	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
867	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
868	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
869	seq_puts(m, " kB\n");
870
871	__show_smap(m, &mss, false);
872
873	seq_printf(m, "THPeligible:    %8u\n",
874		   !!thp_vma_allowable_orders(vma, vma->vm_flags,
875			   TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
876
877	if (arch_pkeys_enabled())
878		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
879	show_smap_vma_flags(m, vma);
880
881	return 0;
882}
883
884static int show_smaps_rollup(struct seq_file *m, void *v)
885{
886	struct proc_maps_private *priv = m->private;
887	struct mem_size_stats mss = {};
888	struct mm_struct *mm = priv->mm;
889	struct vm_area_struct *vma;
890	unsigned long vma_start = 0, last_vma_end = 0;
891	int ret = 0;
892	VMA_ITERATOR(vmi, mm, 0);
893
894	priv->task = get_proc_task(priv->inode);
895	if (!priv->task)
896		return -ESRCH;
897
898	if (!mm || !mmget_not_zero(mm)) {
899		ret = -ESRCH;
900		goto out_put_task;
901	}
902
903	ret = mmap_read_lock_killable(mm);
904	if (ret)
905		goto out_put_mm;
906
907	hold_task_mempolicy(priv);
908	vma = vma_next(&vmi);
909
910	if (unlikely(!vma))
911		goto empty_set;
912
913	vma_start = vma->vm_start;
914	do {
915		smap_gather_stats(vma, &mss, 0);
916		last_vma_end = vma->vm_end;
917
918		/*
919		 * Release mmap_lock temporarily if someone wants to
920		 * access it for write request.
921		 */
922		if (mmap_lock_is_contended(mm)) {
923			vma_iter_invalidate(&vmi);
924			mmap_read_unlock(mm);
925			ret = mmap_read_lock_killable(mm);
926			if (ret) {
927				release_task_mempolicy(priv);
928				goto out_put_mm;
929			}
930
931			/*
932			 * After dropping the lock, there are four cases to
933			 * consider. See the following example for explanation.
934			 *
935			 *   +------+------+-----------+
936			 *   | VMA1 | VMA2 | VMA3      |
937			 *   +------+------+-----------+
938			 *   |      |      |           |
939			 *  4k     8k     16k         400k
940			 *
941			 * Suppose we drop the lock after reading VMA2 due to
942			 * contention, then we get:
943			 *
944			 *	last_vma_end = 16k
945			 *
946			 * 1) VMA2 is freed, but VMA3 exists:
947			 *
948			 *    vma_next(vmi) will return VMA3.
949			 *    In this case, just continue from VMA3.
950			 *
951			 * 2) VMA2 still exists:
952			 *
953			 *    vma_next(vmi) will return VMA3.
954			 *    In this case, just continue from VMA3.
955			 *
956			 * 3) No more VMAs can be found:
957			 *
958			 *    vma_next(vmi) will return NULL.
959			 *    No more things to do, just break.
960			 *
961			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
962			 *
963			 *    vma_next(vmi) will return VMA' whose range
964			 *    contains last_vma_end.
965			 *    Iterate VMA' from last_vma_end.
966			 */
967			vma = vma_next(&vmi);
968			/* Case 3 above */
969			if (!vma)
970				break;
971
972			/* Case 1 and 2 above */
973			if (vma->vm_start >= last_vma_end) {
974				smap_gather_stats(vma, &mss, 0);
975				last_vma_end = vma->vm_end;
976				continue;
977			}
978
979			/* Case 4 above */
980			if (vma->vm_end > last_vma_end) {
981				smap_gather_stats(vma, &mss, last_vma_end);
982				last_vma_end = vma->vm_end;
983			}
984		}
985	} for_each_vma(vmi, vma);
986
987empty_set:
988	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
989	seq_pad(m, ' ');
990	seq_puts(m, "[rollup]\n");
991
992	__show_smap(m, &mss, true);
993
994	release_task_mempolicy(priv);
995	mmap_read_unlock(mm);
996
997out_put_mm:
998	mmput(mm);
999out_put_task:
1000	put_task_struct(priv->task);
1001	priv->task = NULL;
1002
1003	return ret;
1004}
1005#undef SEQ_PUT_DEC
1006
1007static const struct seq_operations proc_pid_smaps_op = {
1008	.start	= m_start,
1009	.next	= m_next,
1010	.stop	= m_stop,
1011	.show	= show_smap
1012};
1013
1014static int pid_smaps_open(struct inode *inode, struct file *file)
1015{
1016	return do_maps_open(inode, file, &proc_pid_smaps_op);
1017}
1018
1019static int smaps_rollup_open(struct inode *inode, struct file *file)
1020{
1021	int ret;
1022	struct proc_maps_private *priv;
1023
1024	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1025	if (!priv)
1026		return -ENOMEM;
1027
1028	ret = single_open(file, show_smaps_rollup, priv);
1029	if (ret)
1030		goto out_free;
1031
1032	priv->inode = inode;
1033	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1034	if (IS_ERR(priv->mm)) {
1035		ret = PTR_ERR(priv->mm);
1036
1037		single_release(inode, file);
1038		goto out_free;
1039	}
1040
1041	return 0;
1042
1043out_free:
1044	kfree(priv);
1045	return ret;
1046}
1047
1048static int smaps_rollup_release(struct inode *inode, struct file *file)
1049{
1050	struct seq_file *seq = file->private_data;
1051	struct proc_maps_private *priv = seq->private;
1052
1053	if (priv->mm)
1054		mmdrop(priv->mm);
1055
1056	kfree(priv);
1057	return single_release(inode, file);
1058}
1059
1060const struct file_operations proc_pid_smaps_operations = {
1061	.open		= pid_smaps_open,
1062	.read		= seq_read,
1063	.llseek		= seq_lseek,
1064	.release	= proc_map_release,
1065};
1066
1067const struct file_operations proc_pid_smaps_rollup_operations = {
1068	.open		= smaps_rollup_open,
1069	.read		= seq_read,
1070	.llseek		= seq_lseek,
1071	.release	= smaps_rollup_release,
1072};
1073
1074enum clear_refs_types {
1075	CLEAR_REFS_ALL = 1,
1076	CLEAR_REFS_ANON,
1077	CLEAR_REFS_MAPPED,
1078	CLEAR_REFS_SOFT_DIRTY,
1079	CLEAR_REFS_MM_HIWATER_RSS,
1080	CLEAR_REFS_LAST,
1081};
1082
1083struct clear_refs_private {
1084	enum clear_refs_types type;
1085};
1086
1087#ifdef CONFIG_MEM_SOFT_DIRTY
1088
1089static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1090{
1091	struct page *page;
1092
1093	if (!pte_write(pte))
1094		return false;
1095	if (!is_cow_mapping(vma->vm_flags))
1096		return false;
1097	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1098		return false;
1099	page = vm_normal_page(vma, addr, pte);
1100	if (!page)
1101		return false;
1102	return page_maybe_dma_pinned(page);
1103}
1104
1105static inline void clear_soft_dirty(struct vm_area_struct *vma,
1106		unsigned long addr, pte_t *pte)
1107{
1108	/*
1109	 * The soft-dirty tracker uses #PF-s to catch writes
1110	 * to pages, so write-protect the pte as well. See the
1111	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1112	 * of how soft-dirty works.
1113	 */
1114	pte_t ptent = ptep_get(pte);
1115
1116	if (pte_present(ptent)) {
1117		pte_t old_pte;
1118
1119		if (pte_is_pinned(vma, addr, ptent))
1120			return;
1121		old_pte = ptep_modify_prot_start(vma, addr, pte);
1122		ptent = pte_wrprotect(old_pte);
1123		ptent = pte_clear_soft_dirty(ptent);
1124		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1125	} else if (is_swap_pte(ptent)) {
1126		ptent = pte_swp_clear_soft_dirty(ptent);
1127		set_pte_at(vma->vm_mm, addr, pte, ptent);
1128	}
1129}
1130#else
1131static inline void clear_soft_dirty(struct vm_area_struct *vma,
1132		unsigned long addr, pte_t *pte)
1133{
1134}
1135#endif
1136
1137#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1138static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1139		unsigned long addr, pmd_t *pmdp)
1140{
1141	pmd_t old, pmd = *pmdp;
1142
1143	if (pmd_present(pmd)) {
1144		/* See comment in change_huge_pmd() */
1145		old = pmdp_invalidate(vma, addr, pmdp);
1146		if (pmd_dirty(old))
1147			pmd = pmd_mkdirty(pmd);
1148		if (pmd_young(old))
1149			pmd = pmd_mkyoung(pmd);
1150
1151		pmd = pmd_wrprotect(pmd);
1152		pmd = pmd_clear_soft_dirty(pmd);
1153
1154		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1155	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1156		pmd = pmd_swp_clear_soft_dirty(pmd);
1157		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1158	}
1159}
1160#else
1161static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1162		unsigned long addr, pmd_t *pmdp)
1163{
1164}
1165#endif
1166
1167static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1168				unsigned long end, struct mm_walk *walk)
1169{
1170	struct clear_refs_private *cp = walk->private;
1171	struct vm_area_struct *vma = walk->vma;
1172	pte_t *pte, ptent;
1173	spinlock_t *ptl;
1174	struct folio *folio;
1175
1176	ptl = pmd_trans_huge_lock(pmd, vma);
1177	if (ptl) {
1178		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1179			clear_soft_dirty_pmd(vma, addr, pmd);
1180			goto out;
1181		}
1182
1183		if (!pmd_present(*pmd))
1184			goto out;
1185
1186		folio = pmd_folio(*pmd);
1187
1188		/* Clear accessed and referenced bits. */
1189		pmdp_test_and_clear_young(vma, addr, pmd);
1190		folio_test_clear_young(folio);
1191		folio_clear_referenced(folio);
1192out:
1193		spin_unlock(ptl);
1194		return 0;
1195	}
1196
1197	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1198	if (!pte) {
1199		walk->action = ACTION_AGAIN;
1200		return 0;
1201	}
1202	for (; addr != end; pte++, addr += PAGE_SIZE) {
1203		ptent = ptep_get(pte);
1204
1205		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1206			clear_soft_dirty(vma, addr, pte);
1207			continue;
1208		}
1209
1210		if (!pte_present(ptent))
1211			continue;
1212
1213		folio = vm_normal_folio(vma, addr, ptent);
1214		if (!folio)
1215			continue;
1216
1217		/* Clear accessed and referenced bits. */
1218		ptep_test_and_clear_young(vma, addr, pte);
1219		folio_test_clear_young(folio);
1220		folio_clear_referenced(folio);
1221	}
1222	pte_unmap_unlock(pte - 1, ptl);
1223	cond_resched();
1224	return 0;
1225}
1226
1227static int clear_refs_test_walk(unsigned long start, unsigned long end,
1228				struct mm_walk *walk)
1229{
1230	struct clear_refs_private *cp = walk->private;
1231	struct vm_area_struct *vma = walk->vma;
1232
1233	if (vma->vm_flags & VM_PFNMAP)
1234		return 1;
1235
1236	/*
1237	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1238	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1239	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1240	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1241	 */
1242	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1243		return 1;
1244	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1245		return 1;
1246	return 0;
1247}
1248
1249static const struct mm_walk_ops clear_refs_walk_ops = {
1250	.pmd_entry		= clear_refs_pte_range,
1251	.test_walk		= clear_refs_test_walk,
1252	.walk_lock		= PGWALK_WRLOCK,
1253};
1254
1255static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1256				size_t count, loff_t *ppos)
1257{
1258	struct task_struct *task;
1259	char buffer[PROC_NUMBUF] = {};
1260	struct mm_struct *mm;
1261	struct vm_area_struct *vma;
1262	enum clear_refs_types type;
1263	int itype;
1264	int rv;
1265
1266	if (count > sizeof(buffer) - 1)
1267		count = sizeof(buffer) - 1;
1268	if (copy_from_user(buffer, buf, count))
1269		return -EFAULT;
1270	rv = kstrtoint(strstrip(buffer), 10, &itype);
1271	if (rv < 0)
1272		return rv;
1273	type = (enum clear_refs_types)itype;
1274	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1275		return -EINVAL;
1276
1277	task = get_proc_task(file_inode(file));
1278	if (!task)
1279		return -ESRCH;
1280	mm = get_task_mm(task);
1281	if (mm) {
1282		VMA_ITERATOR(vmi, mm, 0);
1283		struct mmu_notifier_range range;
1284		struct clear_refs_private cp = {
1285			.type = type,
1286		};
1287
1288		if (mmap_write_lock_killable(mm)) {
1289			count = -EINTR;
1290			goto out_mm;
1291		}
1292		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1293			/*
1294			 * Writing 5 to /proc/pid/clear_refs resets the peak
1295			 * resident set size to this mm's current rss value.
1296			 */
1297			reset_mm_hiwater_rss(mm);
1298			goto out_unlock;
1299		}
1300
1301		if (type == CLEAR_REFS_SOFT_DIRTY) {
1302			for_each_vma(vmi, vma) {
1303				if (!(vma->vm_flags & VM_SOFTDIRTY))
1304					continue;
1305				vm_flags_clear(vma, VM_SOFTDIRTY);
1306				vma_set_page_prot(vma);
1307			}
1308
1309			inc_tlb_flush_pending(mm);
1310			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1311						0, mm, 0, -1UL);
1312			mmu_notifier_invalidate_range_start(&range);
1313		}
1314		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1315		if (type == CLEAR_REFS_SOFT_DIRTY) {
1316			mmu_notifier_invalidate_range_end(&range);
1317			flush_tlb_mm(mm);
1318			dec_tlb_flush_pending(mm);
1319		}
1320out_unlock:
1321		mmap_write_unlock(mm);
1322out_mm:
1323		mmput(mm);
1324	}
1325	put_task_struct(task);
1326
1327	return count;
1328}
1329
1330const struct file_operations proc_clear_refs_operations = {
1331	.write		= clear_refs_write,
1332	.llseek		= noop_llseek,
1333};
1334
1335typedef struct {
1336	u64 pme;
1337} pagemap_entry_t;
1338
1339struct pagemapread {
1340	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1341	pagemap_entry_t *buffer;
1342	bool show_pfn;
1343};
1344
1345#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1346#define PAGEMAP_WALK_MASK	(PMD_MASK)
1347
1348#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1349#define PM_PFRAME_BITS		55
1350#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1351#define PM_SOFT_DIRTY		BIT_ULL(55)
1352#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1353#define PM_UFFD_WP		BIT_ULL(57)
1354#define PM_FILE			BIT_ULL(61)
1355#define PM_SWAP			BIT_ULL(62)
1356#define PM_PRESENT		BIT_ULL(63)
1357
1358#define PM_END_OF_BUFFER    1
1359
1360static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1361{
1362	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1363}
1364
1365static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
1366{
1367	pm->buffer[pm->pos++] = *pme;
1368	if (pm->pos >= pm->len)
1369		return PM_END_OF_BUFFER;
1370	return 0;
1371}
1372
1373static int pagemap_pte_hole(unsigned long start, unsigned long end,
1374			    __always_unused int depth, struct mm_walk *walk)
1375{
1376	struct pagemapread *pm = walk->private;
1377	unsigned long addr = start;
1378	int err = 0;
1379
1380	while (addr < end) {
1381		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1382		pagemap_entry_t pme = make_pme(0, 0);
1383		/* End of address space hole, which we mark as non-present. */
1384		unsigned long hole_end;
1385
1386		if (vma)
1387			hole_end = min(end, vma->vm_start);
1388		else
1389			hole_end = end;
1390
1391		for (; addr < hole_end; addr += PAGE_SIZE) {
1392			err = add_to_pagemap(&pme, pm);
1393			if (err)
1394				goto out;
1395		}
1396
1397		if (!vma)
1398			break;
1399
1400		/* Addresses in the VMA. */
1401		if (vma->vm_flags & VM_SOFTDIRTY)
1402			pme = make_pme(0, PM_SOFT_DIRTY);
1403		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1404			err = add_to_pagemap(&pme, pm);
1405			if (err)
1406				goto out;
1407		}
1408	}
1409out:
1410	return err;
1411}
1412
1413static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1414		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1415{
1416	u64 frame = 0, flags = 0;
1417	struct page *page = NULL;
1418	bool migration = false;
1419
1420	if (pte_present(pte)) {
1421		if (pm->show_pfn)
1422			frame = pte_pfn(pte);
1423		flags |= PM_PRESENT;
1424		page = vm_normal_page(vma, addr, pte);
1425		if (pte_soft_dirty(pte))
1426			flags |= PM_SOFT_DIRTY;
1427		if (pte_uffd_wp(pte))
1428			flags |= PM_UFFD_WP;
1429	} else if (is_swap_pte(pte)) {
1430		swp_entry_t entry;
1431		if (pte_swp_soft_dirty(pte))
1432			flags |= PM_SOFT_DIRTY;
1433		if (pte_swp_uffd_wp(pte))
1434			flags |= PM_UFFD_WP;
1435		entry = pte_to_swp_entry(pte);
1436		if (pm->show_pfn) {
1437			pgoff_t offset;
1438			/*
1439			 * For PFN swap offsets, keeping the offset field
1440			 * to be PFN only to be compatible with old smaps.
1441			 */
1442			if (is_pfn_swap_entry(entry))
1443				offset = swp_offset_pfn(entry);
1444			else
1445				offset = swp_offset(entry);
1446			frame = swp_type(entry) |
1447			    (offset << MAX_SWAPFILES_SHIFT);
1448		}
1449		flags |= PM_SWAP;
1450		migration = is_migration_entry(entry);
1451		if (is_pfn_swap_entry(entry))
1452			page = pfn_swap_entry_to_page(entry);
1453		if (pte_marker_entry_uffd_wp(entry))
1454			flags |= PM_UFFD_WP;
1455	}
1456
1457	if (page && !PageAnon(page))
1458		flags |= PM_FILE;
1459	if (page && !migration && page_mapcount(page) == 1)
1460		flags |= PM_MMAP_EXCLUSIVE;
1461	if (vma->vm_flags & VM_SOFTDIRTY)
1462		flags |= PM_SOFT_DIRTY;
1463
1464	return make_pme(frame, flags);
1465}
1466
1467static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1468			     struct mm_walk *walk)
1469{
1470	struct vm_area_struct *vma = walk->vma;
1471	struct pagemapread *pm = walk->private;
1472	spinlock_t *ptl;
1473	pte_t *pte, *orig_pte;
1474	int err = 0;
1475#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1476	bool migration = false;
1477
1478	ptl = pmd_trans_huge_lock(pmdp, vma);
1479	if (ptl) {
1480		u64 flags = 0, frame = 0;
1481		pmd_t pmd = *pmdp;
1482		struct page *page = NULL;
1483
1484		if (vma->vm_flags & VM_SOFTDIRTY)
1485			flags |= PM_SOFT_DIRTY;
1486
1487		if (pmd_present(pmd)) {
1488			page = pmd_page(pmd);
1489
1490			flags |= PM_PRESENT;
1491			if (pmd_soft_dirty(pmd))
1492				flags |= PM_SOFT_DIRTY;
1493			if (pmd_uffd_wp(pmd))
1494				flags |= PM_UFFD_WP;
1495			if (pm->show_pfn)
1496				frame = pmd_pfn(pmd) +
1497					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1498		}
1499#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1500		else if (is_swap_pmd(pmd)) {
1501			swp_entry_t entry = pmd_to_swp_entry(pmd);
1502			unsigned long offset;
1503
1504			if (pm->show_pfn) {
1505				if (is_pfn_swap_entry(entry))
1506					offset = swp_offset_pfn(entry);
1507				else
1508					offset = swp_offset(entry);
1509				offset = offset +
1510					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1511				frame = swp_type(entry) |
1512					(offset << MAX_SWAPFILES_SHIFT);
1513			}
1514			flags |= PM_SWAP;
1515			if (pmd_swp_soft_dirty(pmd))
1516				flags |= PM_SOFT_DIRTY;
1517			if (pmd_swp_uffd_wp(pmd))
1518				flags |= PM_UFFD_WP;
1519			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1520			migration = is_migration_entry(entry);
1521			page = pfn_swap_entry_to_page(entry);
1522		}
1523#endif
1524
1525		if (page && !migration && page_mapcount(page) == 1)
1526			flags |= PM_MMAP_EXCLUSIVE;
1527
1528		for (; addr != end; addr += PAGE_SIZE) {
1529			pagemap_entry_t pme = make_pme(frame, flags);
1530
1531			err = add_to_pagemap(&pme, pm);
1532			if (err)
1533				break;
1534			if (pm->show_pfn) {
1535				if (flags & PM_PRESENT)
1536					frame++;
1537				else if (flags & PM_SWAP)
1538					frame += (1 << MAX_SWAPFILES_SHIFT);
1539			}
1540		}
1541		spin_unlock(ptl);
1542		return err;
1543	}
1544#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1545
1546	/*
1547	 * We can assume that @vma always points to a valid one and @end never
1548	 * goes beyond vma->vm_end.
1549	 */
1550	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1551	if (!pte) {
1552		walk->action = ACTION_AGAIN;
1553		return err;
1554	}
1555	for (; addr < end; pte++, addr += PAGE_SIZE) {
1556		pagemap_entry_t pme;
1557
1558		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1559		err = add_to_pagemap(&pme, pm);
1560		if (err)
1561			break;
1562	}
1563	pte_unmap_unlock(orig_pte, ptl);
1564
1565	cond_resched();
1566
1567	return err;
1568}
1569
1570#ifdef CONFIG_HUGETLB_PAGE
1571/* This function walks within one hugetlb entry in the single call */
1572static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1573				 unsigned long addr, unsigned long end,
1574				 struct mm_walk *walk)
1575{
1576	struct pagemapread *pm = walk->private;
1577	struct vm_area_struct *vma = walk->vma;
1578	u64 flags = 0, frame = 0;
1579	int err = 0;
1580	pte_t pte;
1581
1582	if (vma->vm_flags & VM_SOFTDIRTY)
1583		flags |= PM_SOFT_DIRTY;
1584
1585	pte = huge_ptep_get(ptep);
1586	if (pte_present(pte)) {
1587		struct folio *folio = page_folio(pte_page(pte));
1588
1589		if (!folio_test_anon(folio))
1590			flags |= PM_FILE;
1591
1592		if (!folio_likely_mapped_shared(folio) &&
1593		    !hugetlb_pmd_shared(ptep))
1594			flags |= PM_MMAP_EXCLUSIVE;
1595
1596		if (huge_pte_uffd_wp(pte))
1597			flags |= PM_UFFD_WP;
1598
1599		flags |= PM_PRESENT;
1600		if (pm->show_pfn)
1601			frame = pte_pfn(pte) +
1602				((addr & ~hmask) >> PAGE_SHIFT);
1603	} else if (pte_swp_uffd_wp_any(pte)) {
1604		flags |= PM_UFFD_WP;
1605	}
1606
1607	for (; addr != end; addr += PAGE_SIZE) {
1608		pagemap_entry_t pme = make_pme(frame, flags);
1609
1610		err = add_to_pagemap(&pme, pm);
1611		if (err)
1612			return err;
1613		if (pm->show_pfn && (flags & PM_PRESENT))
1614			frame++;
1615	}
1616
1617	cond_resched();
1618
1619	return err;
1620}
1621#else
1622#define pagemap_hugetlb_range	NULL
1623#endif /* HUGETLB_PAGE */
1624
1625static const struct mm_walk_ops pagemap_ops = {
1626	.pmd_entry	= pagemap_pmd_range,
1627	.pte_hole	= pagemap_pte_hole,
1628	.hugetlb_entry	= pagemap_hugetlb_range,
1629	.walk_lock	= PGWALK_RDLOCK,
1630};
1631
1632/*
1633 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1634 *
1635 * For each page in the address space, this file contains one 64-bit entry
1636 * consisting of the following:
1637 *
1638 * Bits 0-54  page frame number (PFN) if present
1639 * Bits 0-4   swap type if swapped
1640 * Bits 5-54  swap offset if swapped
1641 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1642 * Bit  56    page exclusively mapped
1643 * Bit  57    pte is uffd-wp write-protected
1644 * Bits 58-60 zero
1645 * Bit  61    page is file-page or shared-anon
1646 * Bit  62    page swapped
1647 * Bit  63    page present
1648 *
1649 * If the page is not present but in swap, then the PFN contains an
1650 * encoding of the swap file number and the page's offset into the
1651 * swap. Unmapped pages return a null PFN. This allows determining
1652 * precisely which pages are mapped (or in swap) and comparing mapped
1653 * pages between processes.
1654 *
1655 * Efficient users of this interface will use /proc/pid/maps to
1656 * determine which areas of memory are actually mapped and llseek to
1657 * skip over unmapped regions.
1658 */
1659static ssize_t pagemap_read(struct file *file, char __user *buf,
1660			    size_t count, loff_t *ppos)
1661{
1662	struct mm_struct *mm = file->private_data;
1663	struct pagemapread pm;
1664	unsigned long src;
1665	unsigned long svpfn;
1666	unsigned long start_vaddr;
1667	unsigned long end_vaddr;
1668	int ret = 0, copied = 0;
1669
1670	if (!mm || !mmget_not_zero(mm))
1671		goto out;
1672
1673	ret = -EINVAL;
1674	/* file position must be aligned */
1675	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1676		goto out_mm;
1677
1678	ret = 0;
1679	if (!count)
1680		goto out_mm;
1681
1682	/* do not disclose physical addresses: attack vector */
1683	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1684
1685	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1686	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1687	ret = -ENOMEM;
1688	if (!pm.buffer)
1689		goto out_mm;
1690
1691	src = *ppos;
1692	svpfn = src / PM_ENTRY_BYTES;
1693	end_vaddr = mm->task_size;
1694
1695	/* watch out for wraparound */
1696	start_vaddr = end_vaddr;
1697	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
1698		unsigned long end;
1699
1700		ret = mmap_read_lock_killable(mm);
1701		if (ret)
1702			goto out_free;
1703		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
1704		mmap_read_unlock(mm);
1705
1706		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
1707		if (end >= start_vaddr && end < mm->task_size)
1708			end_vaddr = end;
1709	}
1710
1711	/* Ensure the address is inside the task */
1712	if (start_vaddr > mm->task_size)
1713		start_vaddr = end_vaddr;
1714
1715	ret = 0;
1716	while (count && (start_vaddr < end_vaddr)) {
1717		int len;
1718		unsigned long end;
1719
1720		pm.pos = 0;
1721		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1722		/* overflow ? */
1723		if (end < start_vaddr || end > end_vaddr)
1724			end = end_vaddr;
1725		ret = mmap_read_lock_killable(mm);
1726		if (ret)
1727			goto out_free;
1728		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1729		mmap_read_unlock(mm);
1730		start_vaddr = end;
1731
1732		len = min(count, PM_ENTRY_BYTES * pm.pos);
1733		if (copy_to_user(buf, pm.buffer, len)) {
1734			ret = -EFAULT;
1735			goto out_free;
1736		}
1737		copied += len;
1738		buf += len;
1739		count -= len;
1740	}
1741	*ppos += copied;
1742	if (!ret || ret == PM_END_OF_BUFFER)
1743		ret = copied;
1744
1745out_free:
1746	kfree(pm.buffer);
1747out_mm:
1748	mmput(mm);
1749out:
1750	return ret;
1751}
1752
1753static int pagemap_open(struct inode *inode, struct file *file)
1754{
1755	struct mm_struct *mm;
1756
1757	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1758	if (IS_ERR(mm))
1759		return PTR_ERR(mm);
1760	file->private_data = mm;
1761	return 0;
1762}
1763
1764static int pagemap_release(struct inode *inode, struct file *file)
1765{
1766	struct mm_struct *mm = file->private_data;
1767
1768	if (mm)
1769		mmdrop(mm);
1770	return 0;
1771}
1772
1773#define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
1774				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
1775				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
1776				 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
1777#define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
1778
1779struct pagemap_scan_private {
1780	struct pm_scan_arg arg;
1781	unsigned long masks_of_interest, cur_vma_category;
1782	struct page_region *vec_buf;
1783	unsigned long vec_buf_len, vec_buf_index, found_pages;
1784	struct page_region __user *vec_out;
1785};
1786
1787static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
1788					   struct vm_area_struct *vma,
1789					   unsigned long addr, pte_t pte)
1790{
1791	unsigned long categories = 0;
1792
1793	if (pte_present(pte)) {
1794		struct page *page;
1795
1796		categories |= PAGE_IS_PRESENT;
1797		if (!pte_uffd_wp(pte))
1798			categories |= PAGE_IS_WRITTEN;
1799
1800		if (p->masks_of_interest & PAGE_IS_FILE) {
1801			page = vm_normal_page(vma, addr, pte);
1802			if (page && !PageAnon(page))
1803				categories |= PAGE_IS_FILE;
1804		}
1805
1806		if (is_zero_pfn(pte_pfn(pte)))
1807			categories |= PAGE_IS_PFNZERO;
1808		if (pte_soft_dirty(pte))
1809			categories |= PAGE_IS_SOFT_DIRTY;
1810	} else if (is_swap_pte(pte)) {
1811		swp_entry_t swp;
1812
1813		categories |= PAGE_IS_SWAPPED;
1814		if (!pte_swp_uffd_wp_any(pte))
1815			categories |= PAGE_IS_WRITTEN;
1816
1817		if (p->masks_of_interest & PAGE_IS_FILE) {
1818			swp = pte_to_swp_entry(pte);
1819			if (is_pfn_swap_entry(swp) &&
1820			    !folio_test_anon(pfn_swap_entry_folio(swp)))
1821				categories |= PAGE_IS_FILE;
1822		}
1823		if (pte_swp_soft_dirty(pte))
1824			categories |= PAGE_IS_SOFT_DIRTY;
1825	}
1826
1827	return categories;
1828}
1829
1830static void make_uffd_wp_pte(struct vm_area_struct *vma,
1831			     unsigned long addr, pte_t *pte, pte_t ptent)
1832{
1833	if (pte_present(ptent)) {
1834		pte_t old_pte;
1835
1836		old_pte = ptep_modify_prot_start(vma, addr, pte);
1837		ptent = pte_mkuffd_wp(old_pte);
1838		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1839	} else if (is_swap_pte(ptent)) {
1840		ptent = pte_swp_mkuffd_wp(ptent);
1841		set_pte_at(vma->vm_mm, addr, pte, ptent);
1842	} else {
1843		set_pte_at(vma->vm_mm, addr, pte,
1844			   make_pte_marker(PTE_MARKER_UFFD_WP));
1845	}
1846}
1847
1848#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1849static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
1850					  struct vm_area_struct *vma,
1851					  unsigned long addr, pmd_t pmd)
1852{
1853	unsigned long categories = PAGE_IS_HUGE;
1854
1855	if (pmd_present(pmd)) {
1856		struct page *page;
1857
1858		categories |= PAGE_IS_PRESENT;
1859		if (!pmd_uffd_wp(pmd))
1860			categories |= PAGE_IS_WRITTEN;
1861
1862		if (p->masks_of_interest & PAGE_IS_FILE) {
1863			page = vm_normal_page_pmd(vma, addr, pmd);
1864			if (page && !PageAnon(page))
1865				categories |= PAGE_IS_FILE;
1866		}
1867
1868		if (is_zero_pfn(pmd_pfn(pmd)))
1869			categories |= PAGE_IS_PFNZERO;
1870		if (pmd_soft_dirty(pmd))
1871			categories |= PAGE_IS_SOFT_DIRTY;
1872	} else if (is_swap_pmd(pmd)) {
1873		swp_entry_t swp;
1874
1875		categories |= PAGE_IS_SWAPPED;
1876		if (!pmd_swp_uffd_wp(pmd))
1877			categories |= PAGE_IS_WRITTEN;
1878		if (pmd_swp_soft_dirty(pmd))
1879			categories |= PAGE_IS_SOFT_DIRTY;
1880
1881		if (p->masks_of_interest & PAGE_IS_FILE) {
1882			swp = pmd_to_swp_entry(pmd);
1883			if (is_pfn_swap_entry(swp) &&
1884			    !folio_test_anon(pfn_swap_entry_folio(swp)))
1885				categories |= PAGE_IS_FILE;
1886		}
1887	}
1888
1889	return categories;
1890}
1891
1892static void make_uffd_wp_pmd(struct vm_area_struct *vma,
1893			     unsigned long addr, pmd_t *pmdp)
1894{
1895	pmd_t old, pmd = *pmdp;
1896
1897	if (pmd_present(pmd)) {
1898		old = pmdp_invalidate_ad(vma, addr, pmdp);
1899		pmd = pmd_mkuffd_wp(old);
1900		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1901	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1902		pmd = pmd_swp_mkuffd_wp(pmd);
1903		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1904	}
1905}
1906#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1907
1908#ifdef CONFIG_HUGETLB_PAGE
1909static unsigned long pagemap_hugetlb_category(pte_t pte)
1910{
1911	unsigned long categories = PAGE_IS_HUGE;
1912
1913	/*
1914	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
1915	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
1916	 * swapped pages.
1917	 */
1918	if (pte_present(pte)) {
1919		categories |= PAGE_IS_PRESENT;
1920		if (!huge_pte_uffd_wp(pte))
1921			categories |= PAGE_IS_WRITTEN;
1922		if (!PageAnon(pte_page(pte)))
1923			categories |= PAGE_IS_FILE;
1924		if (is_zero_pfn(pte_pfn(pte)))
1925			categories |= PAGE_IS_PFNZERO;
1926		if (pte_soft_dirty(pte))
1927			categories |= PAGE_IS_SOFT_DIRTY;
1928	} else if (is_swap_pte(pte)) {
1929		categories |= PAGE_IS_SWAPPED;
1930		if (!pte_swp_uffd_wp_any(pte))
1931			categories |= PAGE_IS_WRITTEN;
1932		if (pte_swp_soft_dirty(pte))
1933			categories |= PAGE_IS_SOFT_DIRTY;
1934	}
1935
1936	return categories;
1937}
1938
1939static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
1940				  unsigned long addr, pte_t *ptep,
1941				  pte_t ptent)
1942{
1943	unsigned long psize;
1944
1945	if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
1946		return;
1947
1948	psize = huge_page_size(hstate_vma(vma));
1949
1950	if (is_hugetlb_entry_migration(ptent))
1951		set_huge_pte_at(vma->vm_mm, addr, ptep,
1952				pte_swp_mkuffd_wp(ptent), psize);
1953	else if (!huge_pte_none(ptent))
1954		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
1955					     huge_pte_mkuffd_wp(ptent));
1956	else
1957		set_huge_pte_at(vma->vm_mm, addr, ptep,
1958				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
1959}
1960#endif /* CONFIG_HUGETLB_PAGE */
1961
1962#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1963static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
1964				       unsigned long addr, unsigned long end)
1965{
1966	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
1967
1968	if (cur_buf->start != addr)
1969		cur_buf->end = addr;
1970	else
1971		cur_buf->start = cur_buf->end = 0;
1972
1973	p->found_pages -= (end - addr) / PAGE_SIZE;
1974}
1975#endif
1976
1977static bool pagemap_scan_is_interesting_page(unsigned long categories,
1978					     const struct pagemap_scan_private *p)
1979{
1980	categories ^= p->arg.category_inverted;
1981	if ((categories & p->arg.category_mask) != p->arg.category_mask)
1982		return false;
1983	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
1984		return false;
1985
1986	return true;
1987}
1988
1989static bool pagemap_scan_is_interesting_vma(unsigned long categories,
1990					    const struct pagemap_scan_private *p)
1991{
1992	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
1993
1994	categories ^= p->arg.category_inverted;
1995	if ((categories & required) != required)
1996		return false;
1997
1998	return true;
1999}
2000
2001static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2002				  struct mm_walk *walk)
2003{
2004	struct pagemap_scan_private *p = walk->private;
2005	struct vm_area_struct *vma = walk->vma;
2006	unsigned long vma_category = 0;
2007	bool wp_allowed = userfaultfd_wp_async(vma) &&
2008	    userfaultfd_wp_use_markers(vma);
2009
2010	if (!wp_allowed) {
2011		/* User requested explicit failure over wp-async capability */
2012		if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2013			return -EPERM;
2014		/*
2015		 * User requires wr-protect, and allows silently skipping
2016		 * unsupported vmas.
2017		 */
2018		if (p->arg.flags & PM_SCAN_WP_MATCHING)
2019			return 1;
2020		/*
2021		 * Then the request doesn't involve wr-protects at all,
2022		 * fall through to the rest checks, and allow vma walk.
2023		 */
2024	}
2025
2026	if (vma->vm_flags & VM_PFNMAP)
2027		return 1;
2028
2029	if (wp_allowed)
2030		vma_category |= PAGE_IS_WPALLOWED;
2031
2032	if (vma->vm_flags & VM_SOFTDIRTY)
2033		vma_category |= PAGE_IS_SOFT_DIRTY;
2034
2035	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2036		return 1;
2037
2038	p->cur_vma_category = vma_category;
2039
2040	return 0;
2041}
2042
2043static bool pagemap_scan_push_range(unsigned long categories,
2044				    struct pagemap_scan_private *p,
2045				    unsigned long addr, unsigned long end)
2046{
2047	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2048
2049	/*
2050	 * When there is no output buffer provided at all, the sentinel values
2051	 * won't match here. There is no other way for `cur_buf->end` to be
2052	 * non-zero other than it being non-empty.
2053	 */
2054	if (addr == cur_buf->end && categories == cur_buf->categories) {
2055		cur_buf->end = end;
2056		return true;
2057	}
2058
2059	if (cur_buf->end) {
2060		if (p->vec_buf_index >= p->vec_buf_len - 1)
2061			return false;
2062
2063		cur_buf = &p->vec_buf[++p->vec_buf_index];
2064	}
2065
2066	cur_buf->start = addr;
2067	cur_buf->end = end;
2068	cur_buf->categories = categories;
2069
2070	return true;
2071}
2072
2073static int pagemap_scan_output(unsigned long categories,
2074			       struct pagemap_scan_private *p,
2075			       unsigned long addr, unsigned long *end)
2076{
2077	unsigned long n_pages, total_pages;
2078	int ret = 0;
2079
2080	if (!p->vec_buf)
2081		return 0;
2082
2083	categories &= p->arg.return_mask;
2084
2085	n_pages = (*end - addr) / PAGE_SIZE;
2086	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2087	    total_pages > p->arg.max_pages) {
2088		size_t n_too_much = total_pages - p->arg.max_pages;
2089		*end -= n_too_much * PAGE_SIZE;
2090		n_pages -= n_too_much;
2091		ret = -ENOSPC;
2092	}
2093
2094	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2095		*end = addr;
2096		n_pages = 0;
2097		ret = -ENOSPC;
2098	}
2099
2100	p->found_pages += n_pages;
2101	if (ret)
2102		p->arg.walk_end = *end;
2103
2104	return ret;
2105}
2106
2107static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2108				  unsigned long end, struct mm_walk *walk)
2109{
2110#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2111	struct pagemap_scan_private *p = walk->private;
2112	struct vm_area_struct *vma = walk->vma;
2113	unsigned long categories;
2114	spinlock_t *ptl;
2115	int ret = 0;
2116
2117	ptl = pmd_trans_huge_lock(pmd, vma);
2118	if (!ptl)
2119		return -ENOENT;
2120
2121	categories = p->cur_vma_category |
2122		     pagemap_thp_category(p, vma, start, *pmd);
2123
2124	if (!pagemap_scan_is_interesting_page(categories, p))
2125		goto out_unlock;
2126
2127	ret = pagemap_scan_output(categories, p, start, &end);
2128	if (start == end)
2129		goto out_unlock;
2130
2131	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2132		goto out_unlock;
2133	if (~categories & PAGE_IS_WRITTEN)
2134		goto out_unlock;
2135
2136	/*
2137	 * Break huge page into small pages if the WP operation
2138	 * needs to be performed on a portion of the huge page.
2139	 */
2140	if (end != start + HPAGE_SIZE) {
2141		spin_unlock(ptl);
2142		split_huge_pmd(vma, pmd, start);
2143		pagemap_scan_backout_range(p, start, end);
2144		/* Report as if there was no THP */
2145		return -ENOENT;
2146	}
2147
2148	make_uffd_wp_pmd(vma, start, pmd);
2149	flush_tlb_range(vma, start, end);
2150out_unlock:
2151	spin_unlock(ptl);
2152	return ret;
2153#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2154	return -ENOENT;
2155#endif
2156}
2157
2158static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2159				  unsigned long end, struct mm_walk *walk)
2160{
2161	struct pagemap_scan_private *p = walk->private;
2162	struct vm_area_struct *vma = walk->vma;
2163	unsigned long addr, flush_end = 0;
2164	pte_t *pte, *start_pte;
2165	spinlock_t *ptl;
2166	int ret;
2167
2168	arch_enter_lazy_mmu_mode();
2169
2170	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2171	if (ret != -ENOENT) {
2172		arch_leave_lazy_mmu_mode();
2173		return ret;
2174	}
2175
2176	ret = 0;
2177	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2178	if (!pte) {
2179		arch_leave_lazy_mmu_mode();
2180		walk->action = ACTION_AGAIN;
2181		return 0;
2182	}
2183
2184	if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2185		/* Fast path for performing exclusive WP */
2186		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2187			pte_t ptent = ptep_get(pte);
2188
2189			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2190			    pte_swp_uffd_wp_any(ptent))
2191				continue;
2192			make_uffd_wp_pte(vma, addr, pte, ptent);
2193			if (!flush_end)
2194				start = addr;
2195			flush_end = addr + PAGE_SIZE;
2196		}
2197		goto flush_and_return;
2198	}
2199
2200	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2201	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2202	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2203		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2204			unsigned long next = addr + PAGE_SIZE;
2205			pte_t ptent = ptep_get(pte);
2206
2207			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2208			    pte_swp_uffd_wp_any(ptent))
2209				continue;
2210			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2211						  p, addr, &next);
2212			if (next == addr)
2213				break;
2214			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2215				continue;
2216			make_uffd_wp_pte(vma, addr, pte, ptent);
2217			if (!flush_end)
2218				start = addr;
2219			flush_end = next;
2220		}
2221		goto flush_and_return;
2222	}
2223
2224	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2225		pte_t ptent = ptep_get(pte);
2226		unsigned long categories = p->cur_vma_category |
2227					   pagemap_page_category(p, vma, addr, ptent);
2228		unsigned long next = addr + PAGE_SIZE;
2229
2230		if (!pagemap_scan_is_interesting_page(categories, p))
2231			continue;
2232
2233		ret = pagemap_scan_output(categories, p, addr, &next);
2234		if (next == addr)
2235			break;
2236
2237		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2238			continue;
2239		if (~categories & PAGE_IS_WRITTEN)
2240			continue;
2241
2242		make_uffd_wp_pte(vma, addr, pte, ptent);
2243		if (!flush_end)
2244			start = addr;
2245		flush_end = next;
2246	}
2247
2248flush_and_return:
2249	if (flush_end)
2250		flush_tlb_range(vma, start, addr);
2251
2252	pte_unmap_unlock(start_pte, ptl);
2253	arch_leave_lazy_mmu_mode();
2254
2255	cond_resched();
2256	return ret;
2257}
2258
2259#ifdef CONFIG_HUGETLB_PAGE
2260static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2261				      unsigned long start, unsigned long end,
2262				      struct mm_walk *walk)
2263{
2264	struct pagemap_scan_private *p = walk->private;
2265	struct vm_area_struct *vma = walk->vma;
2266	unsigned long categories;
2267	spinlock_t *ptl;
2268	int ret = 0;
2269	pte_t pte;
2270
2271	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2272		/* Go the short route when not write-protecting pages. */
2273
2274		pte = huge_ptep_get(ptep);
2275		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2276
2277		if (!pagemap_scan_is_interesting_page(categories, p))
2278			return 0;
2279
2280		return pagemap_scan_output(categories, p, start, &end);
2281	}
2282
2283	i_mmap_lock_write(vma->vm_file->f_mapping);
2284	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2285
2286	pte = huge_ptep_get(ptep);
2287	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2288
2289	if (!pagemap_scan_is_interesting_page(categories, p))
2290		goto out_unlock;
2291
2292	ret = pagemap_scan_output(categories, p, start, &end);
2293	if (start == end)
2294		goto out_unlock;
2295
2296	if (~categories & PAGE_IS_WRITTEN)
2297		goto out_unlock;
2298
2299	if (end != start + HPAGE_SIZE) {
2300		/* Partial HugeTLB page WP isn't possible. */
2301		pagemap_scan_backout_range(p, start, end);
2302		p->arg.walk_end = start;
2303		ret = 0;
2304		goto out_unlock;
2305	}
2306
2307	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2308	flush_hugetlb_tlb_range(vma, start, end);
2309
2310out_unlock:
2311	spin_unlock(ptl);
2312	i_mmap_unlock_write(vma->vm_file->f_mapping);
2313
2314	return ret;
2315}
2316#else
2317#define pagemap_scan_hugetlb_entry NULL
2318#endif
2319
2320static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2321				 int depth, struct mm_walk *walk)
2322{
2323	struct pagemap_scan_private *p = walk->private;
2324	struct vm_area_struct *vma = walk->vma;
2325	int ret, err;
2326
2327	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2328		return 0;
2329
2330	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2331	if (addr == end)
2332		return ret;
2333
2334	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2335		return ret;
2336
2337	err = uffd_wp_range(vma, addr, end - addr, true);
2338	if (err < 0)
2339		ret = err;
2340
2341	return ret;
2342}
2343
2344static const struct mm_walk_ops pagemap_scan_ops = {
2345	.test_walk = pagemap_scan_test_walk,
2346	.pmd_entry = pagemap_scan_pmd_entry,
2347	.pte_hole = pagemap_scan_pte_hole,
2348	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2349};
2350
2351static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2352				 unsigned long uarg)
2353{
2354	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2355		return -EFAULT;
2356
2357	if (arg->size != sizeof(struct pm_scan_arg))
2358		return -EINVAL;
2359
2360	/* Validate requested features */
2361	if (arg->flags & ~PM_SCAN_FLAGS)
2362		return -EINVAL;
2363	if ((arg->category_inverted | arg->category_mask |
2364	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2365		return -EINVAL;
2366
2367	arg->start = untagged_addr((unsigned long)arg->start);
2368	arg->end = untagged_addr((unsigned long)arg->end);
2369	arg->vec = untagged_addr((unsigned long)arg->vec);
2370
2371	/* Validate memory pointers */
2372	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2373		return -EINVAL;
2374	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2375		return -EFAULT;
2376	if (!arg->vec && arg->vec_len)
2377		return -EINVAL;
2378	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2379			      arg->vec_len * sizeof(struct page_region)))
2380		return -EFAULT;
2381
2382	/* Fixup default values */
2383	arg->end = ALIGN(arg->end, PAGE_SIZE);
2384	arg->walk_end = 0;
2385	if (!arg->max_pages)
2386		arg->max_pages = ULONG_MAX;
2387
2388	return 0;
2389}
2390
2391static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2392				       unsigned long uargl)
2393{
2394	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2395
2396	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2397		return -EFAULT;
2398
2399	return 0;
2400}
2401
2402static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2403{
2404	if (!p->arg.vec_len)
2405		return 0;
2406
2407	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2408			       p->arg.vec_len);
2409	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2410				   GFP_KERNEL);
2411	if (!p->vec_buf)
2412		return -ENOMEM;
2413
2414	p->vec_buf->start = p->vec_buf->end = 0;
2415	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2416
2417	return 0;
2418}
2419
2420static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2421{
2422	const struct page_region *buf = p->vec_buf;
2423	long n = p->vec_buf_index;
2424
2425	if (!p->vec_buf)
2426		return 0;
2427
2428	if (buf[n].end != buf[n].start)
2429		n++;
2430
2431	if (!n)
2432		return 0;
2433
2434	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2435		return -EFAULT;
2436
2437	p->arg.vec_len -= n;
2438	p->vec_out += n;
2439
2440	p->vec_buf_index = 0;
2441	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2442	p->vec_buf->start = p->vec_buf->end = 0;
2443
2444	return n;
2445}
2446
2447static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2448{
2449	struct pagemap_scan_private p = {0};
2450	unsigned long walk_start;
2451	size_t n_ranges_out = 0;
2452	int ret;
2453
2454	ret = pagemap_scan_get_args(&p.arg, uarg);
2455	if (ret)
2456		return ret;
2457
2458	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2459			      p.arg.return_mask;
2460	ret = pagemap_scan_init_bounce_buffer(&p);
2461	if (ret)
2462		return ret;
2463
2464	for (walk_start = p.arg.start; walk_start < p.arg.end;
2465			walk_start = p.arg.walk_end) {
2466		struct mmu_notifier_range range;
2467		long n_out;
2468
2469		if (fatal_signal_pending(current)) {
2470			ret = -EINTR;
2471			break;
2472		}
2473
2474		ret = mmap_read_lock_killable(mm);
2475		if (ret)
2476			break;
2477
2478		/* Protection change for the range is going to happen. */
2479		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2480			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2481						mm, walk_start, p.arg.end);
2482			mmu_notifier_invalidate_range_start(&range);
2483		}
2484
2485		ret = walk_page_range(mm, walk_start, p.arg.end,
2486				      &pagemap_scan_ops, &p);
2487
2488		if (p.arg.flags & PM_SCAN_WP_MATCHING)
2489			mmu_notifier_invalidate_range_end(&range);
2490
2491		mmap_read_unlock(mm);
2492
2493		n_out = pagemap_scan_flush_buffer(&p);
2494		if (n_out < 0)
2495			ret = n_out;
2496		else
2497			n_ranges_out += n_out;
2498
2499		if (ret != -ENOSPC)
2500			break;
2501
2502		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2503			break;
2504	}
2505
2506	/* ENOSPC signifies early stop (buffer full) from the walk. */
2507	if (!ret || ret == -ENOSPC)
2508		ret = n_ranges_out;
2509
2510	/* The walk_end isn't set when ret is zero */
2511	if (!p.arg.walk_end)
2512		p.arg.walk_end = p.arg.end;
2513	if (pagemap_scan_writeback_args(&p.arg, uarg))
2514		ret = -EFAULT;
2515
2516	kfree(p.vec_buf);
2517	return ret;
2518}
2519
2520static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2521			   unsigned long arg)
2522{
2523	struct mm_struct *mm = file->private_data;
2524
2525	switch (cmd) {
2526	case PAGEMAP_SCAN:
2527		return do_pagemap_scan(mm, arg);
2528
2529	default:
2530		return -EINVAL;
2531	}
2532}
2533
2534const struct file_operations proc_pagemap_operations = {
2535	.llseek		= mem_lseek, /* borrow this */
2536	.read		= pagemap_read,
2537	.open		= pagemap_open,
2538	.release	= pagemap_release,
2539	.unlocked_ioctl = do_pagemap_cmd,
2540	.compat_ioctl	= do_pagemap_cmd,
2541};
2542#endif /* CONFIG_PROC_PAGE_MONITOR */
2543
2544#ifdef CONFIG_NUMA
2545
2546struct numa_maps {
2547	unsigned long pages;
2548	unsigned long anon;
2549	unsigned long active;
2550	unsigned long writeback;
2551	unsigned long mapcount_max;
2552	unsigned long dirty;
2553	unsigned long swapcache;
2554	unsigned long node[MAX_NUMNODES];
2555};
2556
2557struct numa_maps_private {
2558	struct proc_maps_private proc_maps;
2559	struct numa_maps md;
2560};
2561
2562static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2563			unsigned long nr_pages)
2564{
2565	struct folio *folio = page_folio(page);
2566	int count = page_mapcount(page);
2567
2568	md->pages += nr_pages;
2569	if (pte_dirty || folio_test_dirty(folio))
2570		md->dirty += nr_pages;
2571
2572	if (folio_test_swapcache(folio))
2573		md->swapcache += nr_pages;
2574
2575	if (folio_test_active(folio) || folio_test_unevictable(folio))
2576		md->active += nr_pages;
2577
2578	if (folio_test_writeback(folio))
2579		md->writeback += nr_pages;
2580
2581	if (folio_test_anon(folio))
2582		md->anon += nr_pages;
2583
2584	if (count > md->mapcount_max)
2585		md->mapcount_max = count;
2586
2587	md->node[folio_nid(folio)] += nr_pages;
2588}
2589
2590static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2591		unsigned long addr)
2592{
2593	struct page *page;
2594	int nid;
2595
2596	if (!pte_present(pte))
2597		return NULL;
2598
2599	page = vm_normal_page(vma, addr, pte);
2600	if (!page || is_zone_device_page(page))
2601		return NULL;
2602
2603	if (PageReserved(page))
2604		return NULL;
2605
2606	nid = page_to_nid(page);
2607	if (!node_isset(nid, node_states[N_MEMORY]))
2608		return NULL;
2609
2610	return page;
2611}
2612
2613#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2614static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2615					      struct vm_area_struct *vma,
2616					      unsigned long addr)
2617{
2618	struct page *page;
2619	int nid;
2620
2621	if (!pmd_present(pmd))
2622		return NULL;
2623
2624	page = vm_normal_page_pmd(vma, addr, pmd);
2625	if (!page)
2626		return NULL;
2627
2628	if (PageReserved(page))
2629		return NULL;
2630
2631	nid = page_to_nid(page);
2632	if (!node_isset(nid, node_states[N_MEMORY]))
2633		return NULL;
2634
2635	return page;
2636}
2637#endif
2638
2639static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2640		unsigned long end, struct mm_walk *walk)
2641{
2642	struct numa_maps *md = walk->private;
2643	struct vm_area_struct *vma = walk->vma;
2644	spinlock_t *ptl;
2645	pte_t *orig_pte;
2646	pte_t *pte;
2647
2648#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2649	ptl = pmd_trans_huge_lock(pmd, vma);
2650	if (ptl) {
2651		struct page *page;
2652
2653		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2654		if (page)
2655			gather_stats(page, md, pmd_dirty(*pmd),
2656				     HPAGE_PMD_SIZE/PAGE_SIZE);
2657		spin_unlock(ptl);
2658		return 0;
2659	}
2660#endif
2661	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2662	if (!pte) {
2663		walk->action = ACTION_AGAIN;
2664		return 0;
2665	}
2666	do {
2667		pte_t ptent = ptep_get(pte);
2668		struct page *page = can_gather_numa_stats(ptent, vma, addr);
2669		if (!page)
2670			continue;
2671		gather_stats(page, md, pte_dirty(ptent), 1);
2672
2673	} while (pte++, addr += PAGE_SIZE, addr != end);
2674	pte_unmap_unlock(orig_pte, ptl);
2675	cond_resched();
2676	return 0;
2677}
2678#ifdef CONFIG_HUGETLB_PAGE
2679static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2680		unsigned long addr, unsigned long end, struct mm_walk *walk)
2681{
2682	pte_t huge_pte = huge_ptep_get(pte);
2683	struct numa_maps *md;
2684	struct page *page;
2685
2686	if (!pte_present(huge_pte))
2687		return 0;
2688
2689	page = pte_page(huge_pte);
2690
2691	md = walk->private;
2692	gather_stats(page, md, pte_dirty(huge_pte), 1);
2693	return 0;
2694}
2695
2696#else
2697static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2698		unsigned long addr, unsigned long end, struct mm_walk *walk)
2699{
2700	return 0;
2701}
2702#endif
2703
2704static const struct mm_walk_ops show_numa_ops = {
2705	.hugetlb_entry = gather_hugetlb_stats,
2706	.pmd_entry = gather_pte_stats,
2707	.walk_lock = PGWALK_RDLOCK,
2708};
2709
2710/*
2711 * Display pages allocated per node and memory policy via /proc.
2712 */
2713static int show_numa_map(struct seq_file *m, void *v)
2714{
2715	struct numa_maps_private *numa_priv = m->private;
2716	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
2717	struct vm_area_struct *vma = v;
2718	struct numa_maps *md = &numa_priv->md;
2719	struct file *file = vma->vm_file;
2720	struct mm_struct *mm = vma->vm_mm;
2721	char buffer[64];
2722	struct mempolicy *pol;
2723	pgoff_t ilx;
2724	int nid;
2725
2726	if (!mm)
2727		return 0;
2728
2729	/* Ensure we start with an empty set of numa_maps statistics. */
2730	memset(md, 0, sizeof(*md));
2731
2732	pol = __get_vma_policy(vma, vma->vm_start, &ilx);
2733	if (pol) {
2734		mpol_to_str(buffer, sizeof(buffer), pol);
2735		mpol_cond_put(pol);
2736	} else {
2737		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
2738	}
2739
2740	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2741
2742	if (file) {
2743		seq_puts(m, " file=");
2744		seq_path(m, file_user_path(file), "\n\t= ");
2745	} else if (vma_is_initial_heap(vma)) {
2746		seq_puts(m, " heap");
2747	} else if (vma_is_initial_stack(vma)) {
2748		seq_puts(m, " stack");
2749	}
2750
2751	if (is_vm_hugetlb_page(vma))
2752		seq_puts(m, " huge");
2753
2754	/* mmap_lock is held by m_start */
2755	walk_page_vma(vma, &show_numa_ops, md);
2756
2757	if (!md->pages)
2758		goto out;
2759
2760	if (md->anon)
2761		seq_printf(m, " anon=%lu", md->anon);
2762
2763	if (md->dirty)
2764		seq_printf(m, " dirty=%lu", md->dirty);
2765
2766	if (md->pages != md->anon && md->pages != md->dirty)
2767		seq_printf(m, " mapped=%lu", md->pages);
2768
2769	if (md->mapcount_max > 1)
2770		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2771
2772	if (md->swapcache)
2773		seq_printf(m, " swapcache=%lu", md->swapcache);
2774
2775	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2776		seq_printf(m, " active=%lu", md->active);
2777
2778	if (md->writeback)
2779		seq_printf(m, " writeback=%lu", md->writeback);
2780
2781	for_each_node_state(nid, N_MEMORY)
2782		if (md->node[nid])
2783			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2784
2785	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2786out:
2787	seq_putc(m, '\n');
2788	return 0;
2789}
2790
2791static const struct seq_operations proc_pid_numa_maps_op = {
2792	.start  = m_start,
2793	.next   = m_next,
2794	.stop   = m_stop,
2795	.show   = show_numa_map,
2796};
2797
2798static int pid_numa_maps_open(struct inode *inode, struct file *file)
2799{
2800	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2801				sizeof(struct numa_maps_private));
2802}
2803
2804const struct file_operations proc_pid_numa_maps_operations = {
2805	.open		= pid_numa_maps_open,
2806	.read		= seq_read,
2807	.llseek		= seq_lseek,
2808	.release	= proc_map_release,
2809};
2810
2811#endif /* CONFIG_NUMA */
2812