1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ppc64 code to implement the kexec_file_load syscall
4 *
5 * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
6 * Copyright (C) 2004  IBM Corp.
7 * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
8 * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
9 * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
10 * Copyright (C) 2020  IBM Corporation
11 *
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13 * Heavily modified for the kernel by
14 * Hari Bathini, IBM Corporation.
15 */
16
17#include <linux/kexec.h>
18#include <linux/of_fdt.h>
19#include <linux/libfdt.h>
20#include <linux/of.h>
21#include <linux/memblock.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <asm/setup.h>
25#include <asm/drmem.h>
26#include <asm/firmware.h>
27#include <asm/kexec_ranges.h>
28#include <asm/crashdump-ppc64.h>
29#include <asm/mmzone.h>
30#include <asm/iommu.h>
31#include <asm/prom.h>
32#include <asm/plpks.h>
33#include <asm/cputhreads.h>
34
35struct umem_info {
36	__be64 *buf;		/* data buffer for usable-memory property */
37	u32 size;		/* size allocated for the data buffer */
38	u32 max_entries;	/* maximum no. of entries */
39	u32 idx;		/* index of current entry */
40
41	/* usable memory ranges to look up */
42	unsigned int nr_ranges;
43	const struct range *ranges;
44};
45
46const struct kexec_file_ops * const kexec_file_loaders[] = {
47	&kexec_elf64_ops,
48	NULL
49};
50
51/**
52 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
53 *                              in the memory regions between buf_min & buf_max
54 *                              for the buffer. If found, sets kbuf->mem.
55 * @kbuf:                       Buffer contents and memory parameters.
56 * @buf_min:                    Minimum address for the buffer.
57 * @buf_max:                    Maximum address for the buffer.
58 *
59 * Returns 0 on success, negative errno on error.
60 */
61static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
62				      u64 buf_min, u64 buf_max)
63{
64	int ret = -EADDRNOTAVAIL;
65	phys_addr_t start, end;
66	u64 i;
67
68	for_each_mem_range_rev(i, &start, &end) {
69		/*
70		 * memblock uses [start, end) convention while it is
71		 * [start, end] here. Fix the off-by-one to have the
72		 * same convention.
73		 */
74		end -= 1;
75
76		if (start > buf_max)
77			continue;
78
79		/* Memory hole not found */
80		if (end < buf_min)
81			break;
82
83		/* Adjust memory region based on the given range */
84		if (start < buf_min)
85			start = buf_min;
86		if (end > buf_max)
87			end = buf_max;
88
89		start = ALIGN(start, kbuf->buf_align);
90		if (start < end && (end - start + 1) >= kbuf->memsz) {
91			/* Suitable memory range found. Set kbuf->mem */
92			kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
93					       kbuf->buf_align);
94			ret = 0;
95			break;
96		}
97	}
98
99	return ret;
100}
101
102/**
103 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
104 *                                  suitable buffer with top down approach.
105 * @kbuf:                           Buffer contents and memory parameters.
106 * @buf_min:                        Minimum address for the buffer.
107 * @buf_max:                        Maximum address for the buffer.
108 * @emem:                           Exclude memory ranges.
109 *
110 * Returns 0 on success, negative errno on error.
111 */
112static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
113					  u64 buf_min, u64 buf_max,
114					  const struct crash_mem *emem)
115{
116	int i, ret = 0, err = -EADDRNOTAVAIL;
117	u64 start, end, tmin, tmax;
118
119	tmax = buf_max;
120	for (i = (emem->nr_ranges - 1); i >= 0; i--) {
121		start = emem->ranges[i].start;
122		end = emem->ranges[i].end;
123
124		if (start > tmax)
125			continue;
126
127		if (end < tmax) {
128			tmin = (end < buf_min ? buf_min : end + 1);
129			ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
130			if (!ret)
131				return 0;
132		}
133
134		tmax = start - 1;
135
136		if (tmax < buf_min) {
137			ret = err;
138			break;
139		}
140		ret = 0;
141	}
142
143	if (!ret) {
144		tmin = buf_min;
145		ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
146	}
147	return ret;
148}
149
150/**
151 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
152 *                               in the memory regions between buf_min & buf_max
153 *                               for the buffer. If found, sets kbuf->mem.
154 * @kbuf:                        Buffer contents and memory parameters.
155 * @buf_min:                     Minimum address for the buffer.
156 * @buf_max:                     Maximum address for the buffer.
157 *
158 * Returns 0 on success, negative errno on error.
159 */
160static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
161				       u64 buf_min, u64 buf_max)
162{
163	int ret = -EADDRNOTAVAIL;
164	phys_addr_t start, end;
165	u64 i;
166
167	for_each_mem_range(i, &start, &end) {
168		/*
169		 * memblock uses [start, end) convention while it is
170		 * [start, end] here. Fix the off-by-one to have the
171		 * same convention.
172		 */
173		end -= 1;
174
175		if (end < buf_min)
176			continue;
177
178		/* Memory hole not found */
179		if (start > buf_max)
180			break;
181
182		/* Adjust memory region based on the given range */
183		if (start < buf_min)
184			start = buf_min;
185		if (end > buf_max)
186			end = buf_max;
187
188		start = ALIGN(start, kbuf->buf_align);
189		if (start < end && (end - start + 1) >= kbuf->memsz) {
190			/* Suitable memory range found. Set kbuf->mem */
191			kbuf->mem = start;
192			ret = 0;
193			break;
194		}
195	}
196
197	return ret;
198}
199
200/**
201 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
202 *                                   suitable buffer with bottom up approach.
203 * @kbuf:                            Buffer contents and memory parameters.
204 * @buf_min:                         Minimum address for the buffer.
205 * @buf_max:                         Maximum address for the buffer.
206 * @emem:                            Exclude memory ranges.
207 *
208 * Returns 0 on success, negative errno on error.
209 */
210static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
211					   u64 buf_min, u64 buf_max,
212					   const struct crash_mem *emem)
213{
214	int i, ret = 0, err = -EADDRNOTAVAIL;
215	u64 start, end, tmin, tmax;
216
217	tmin = buf_min;
218	for (i = 0; i < emem->nr_ranges; i++) {
219		start = emem->ranges[i].start;
220		end = emem->ranges[i].end;
221
222		if (end < tmin)
223			continue;
224
225		if (start > tmin) {
226			tmax = (start > buf_max ? buf_max : start - 1);
227			ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
228			if (!ret)
229				return 0;
230		}
231
232		tmin = end + 1;
233
234		if (tmin > buf_max) {
235			ret = err;
236			break;
237		}
238		ret = 0;
239	}
240
241	if (!ret) {
242		tmax = buf_max;
243		ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
244	}
245	return ret;
246}
247
248#ifdef CONFIG_CRASH_DUMP
249/**
250 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
251 * @um_info:                  Usable memory buffer and ranges info.
252 * @cnt:                      No. of entries to accommodate.
253 *
254 * Frees up the old buffer if memory reallocation fails.
255 *
256 * Returns buffer on success, NULL on error.
257 */
258static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
259{
260	u32 new_size;
261	__be64 *tbuf;
262
263	if ((um_info->idx + cnt) <= um_info->max_entries)
264		return um_info->buf;
265
266	new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
267	tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
268	if (tbuf) {
269		um_info->buf = tbuf;
270		um_info->size = new_size;
271		um_info->max_entries = (um_info->size / sizeof(u64));
272	}
273
274	return tbuf;
275}
276
277/**
278 * add_usable_mem - Add the usable memory ranges within the given memory range
279 *                  to the buffer
280 * @um_info:        Usable memory buffer and ranges info.
281 * @base:           Base address of memory range to look for.
282 * @end:            End address of memory range to look for.
283 *
284 * Returns 0 on success, negative errno on error.
285 */
286static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
287{
288	u64 loc_base, loc_end;
289	bool add;
290	int i;
291
292	for (i = 0; i < um_info->nr_ranges; i++) {
293		add = false;
294		loc_base = um_info->ranges[i].start;
295		loc_end = um_info->ranges[i].end;
296		if (loc_base >= base && loc_end <= end)
297			add = true;
298		else if (base < loc_end && end > loc_base) {
299			if (loc_base < base)
300				loc_base = base;
301			if (loc_end > end)
302				loc_end = end;
303			add = true;
304		}
305
306		if (add) {
307			if (!check_realloc_usable_mem(um_info, 2))
308				return -ENOMEM;
309
310			um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
311			um_info->buf[um_info->idx++] =
312					cpu_to_be64(loc_end - loc_base + 1);
313		}
314	}
315
316	return 0;
317}
318
319/**
320 * kdump_setup_usable_lmb - This is a callback function that gets called by
321 *                          walk_drmem_lmbs for every LMB to set its
322 *                          usable memory ranges.
323 * @lmb:                    LMB info.
324 * @usm:                    linux,drconf-usable-memory property value.
325 * @data:                   Pointer to usable memory buffer and ranges info.
326 *
327 * Returns 0 on success, negative errno on error.
328 */
329static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
330				  void *data)
331{
332	struct umem_info *um_info;
333	int tmp_idx, ret;
334	u64 base, end;
335
336	/*
337	 * kdump load isn't supported on kernels already booted with
338	 * linux,drconf-usable-memory property.
339	 */
340	if (*usm) {
341		pr_err("linux,drconf-usable-memory property already exists!");
342		return -EINVAL;
343	}
344
345	um_info = data;
346	tmp_idx = um_info->idx;
347	if (!check_realloc_usable_mem(um_info, 1))
348		return -ENOMEM;
349
350	um_info->idx++;
351	base = lmb->base_addr;
352	end = base + drmem_lmb_size() - 1;
353	ret = add_usable_mem(um_info, base, end);
354	if (!ret) {
355		/*
356		 * Update the no. of ranges added. Two entries (base & size)
357		 * for every range added.
358		 */
359		um_info->buf[tmp_idx] =
360				cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
361	}
362
363	return ret;
364}
365
366#define NODE_PATH_LEN		256
367/**
368 * add_usable_mem_property - Add usable memory property for the given
369 *                           memory node.
370 * @fdt:                     Flattened device tree for the kdump kernel.
371 * @dn:                      Memory node.
372 * @um_info:                 Usable memory buffer and ranges info.
373 *
374 * Returns 0 on success, negative errno on error.
375 */
376static int add_usable_mem_property(void *fdt, struct device_node *dn,
377				   struct umem_info *um_info)
378{
379	int n_mem_addr_cells, n_mem_size_cells, node;
380	char path[NODE_PATH_LEN];
381	int i, len, ranges, ret;
382	const __be32 *prop;
383	u64 base, end;
384
385	of_node_get(dn);
386
387	if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
388		pr_err("Buffer (%d) too small for memory node: %pOF\n",
389		       NODE_PATH_LEN, dn);
390		return -EOVERFLOW;
391	}
392	kexec_dprintk("Memory node path: %s\n", path);
393
394	/* Now that we know the path, find its offset in kdump kernel's fdt */
395	node = fdt_path_offset(fdt, path);
396	if (node < 0) {
397		pr_err("Malformed device tree: error reading %s\n", path);
398		ret = -EINVAL;
399		goto out;
400	}
401
402	/* Get the address & size cells */
403	n_mem_addr_cells = of_n_addr_cells(dn);
404	n_mem_size_cells = of_n_size_cells(dn);
405	kexec_dprintk("address cells: %d, size cells: %d\n", n_mem_addr_cells,
406		      n_mem_size_cells);
407
408	um_info->idx  = 0;
409	if (!check_realloc_usable_mem(um_info, 2)) {
410		ret = -ENOMEM;
411		goto out;
412	}
413
414	prop = of_get_property(dn, "reg", &len);
415	if (!prop || len <= 0) {
416		ret = 0;
417		goto out;
418	}
419
420	/*
421	 * "reg" property represents sequence of (addr,size) tuples
422	 * each representing a memory range.
423	 */
424	ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
425
426	for (i = 0; i < ranges; i++) {
427		base = of_read_number(prop, n_mem_addr_cells);
428		prop += n_mem_addr_cells;
429		end = base + of_read_number(prop, n_mem_size_cells) - 1;
430		prop += n_mem_size_cells;
431
432		ret = add_usable_mem(um_info, base, end);
433		if (ret)
434			goto out;
435	}
436
437	/*
438	 * No kdump kernel usable memory found in this memory node.
439	 * Write (0,0) tuple in linux,usable-memory property for
440	 * this region to be ignored.
441	 */
442	if (um_info->idx == 0) {
443		um_info->buf[0] = 0;
444		um_info->buf[1] = 0;
445		um_info->idx = 2;
446	}
447
448	ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
449			  (um_info->idx * sizeof(u64)));
450
451out:
452	of_node_put(dn);
453	return ret;
454}
455
456
457/**
458 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
459 *                         and linux,drconf-usable-memory DT properties as
460 *                         appropriate to restrict its memory usage.
461 * @fdt:                   Flattened device tree for the kdump kernel.
462 * @usable_mem:            Usable memory ranges for kdump kernel.
463 *
464 * Returns 0 on success, negative errno on error.
465 */
466static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
467{
468	struct umem_info um_info;
469	struct device_node *dn;
470	int node, ret = 0;
471
472	if (!usable_mem) {
473		pr_err("Usable memory ranges for kdump kernel not found\n");
474		return -ENOENT;
475	}
476
477	node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
478	if (node == -FDT_ERR_NOTFOUND)
479		kexec_dprintk("No dynamic reconfiguration memory found\n");
480	else if (node < 0) {
481		pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
482		return -EINVAL;
483	}
484
485	um_info.buf  = NULL;
486	um_info.size = 0;
487	um_info.max_entries = 0;
488	um_info.idx  = 0;
489	/* Memory ranges to look up */
490	um_info.ranges = &(usable_mem->ranges[0]);
491	um_info.nr_ranges = usable_mem->nr_ranges;
492
493	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
494	if (dn) {
495		ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
496		of_node_put(dn);
497
498		if (ret) {
499			pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
500			goto out;
501		}
502
503		ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
504				  um_info.buf, (um_info.idx * sizeof(u64)));
505		if (ret) {
506			pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
507			       fdt_strerror(ret));
508			goto out;
509		}
510	}
511
512	/*
513	 * Walk through each memory node and set linux,usable-memory property
514	 * for the corresponding node in kdump kernel's fdt.
515	 */
516	for_each_node_by_type(dn, "memory") {
517		ret = add_usable_mem_property(fdt, dn, &um_info);
518		if (ret) {
519			pr_err("Failed to set linux,usable-memory property for %s node",
520			       dn->full_name);
521			of_node_put(dn);
522			goto out;
523		}
524	}
525
526out:
527	kfree(um_info.buf);
528	return ret;
529}
530
531/**
532 * load_backup_segment - Locate a memory hole to place the backup region.
533 * @image:               Kexec image.
534 * @kbuf:                Buffer contents and memory parameters.
535 *
536 * Returns 0 on success, negative errno on error.
537 */
538static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
539{
540	void *buf;
541	int ret;
542
543	/*
544	 * Setup a source buffer for backup segment.
545	 *
546	 * A source buffer has no meaning for backup region as data will
547	 * be copied from backup source, after crash, in the purgatory.
548	 * But as load segment code doesn't recognize such segments,
549	 * setup a dummy source buffer to keep it happy for now.
550	 */
551	buf = vzalloc(BACKUP_SRC_SIZE);
552	if (!buf)
553		return -ENOMEM;
554
555	kbuf->buffer = buf;
556	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
557	kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
558	kbuf->top_down = false;
559
560	ret = kexec_add_buffer(kbuf);
561	if (ret) {
562		vfree(buf);
563		return ret;
564	}
565
566	image->arch.backup_buf = buf;
567	image->arch.backup_start = kbuf->mem;
568	return 0;
569}
570
571/**
572 * update_backup_region_phdr - Update backup region's offset for the core to
573 *                             export the region appropriately.
574 * @image:                     Kexec image.
575 * @ehdr:                      ELF core header.
576 *
577 * Assumes an exclusive program header is setup for the backup region
578 * in the ELF headers
579 *
580 * Returns nothing.
581 */
582static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
583{
584	Elf64_Phdr *phdr;
585	unsigned int i;
586
587	phdr = (Elf64_Phdr *)(ehdr + 1);
588	for (i = 0; i < ehdr->e_phnum; i++) {
589		if (phdr->p_paddr == BACKUP_SRC_START) {
590			phdr->p_offset = image->arch.backup_start;
591			kexec_dprintk("Backup region offset updated to 0x%lx\n",
592				      image->arch.backup_start);
593			return;
594		}
595	}
596}
597
598static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem)
599{
600#if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
601	unsigned int extra_sz = 0;
602
603	if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM)
604		pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES);
605	else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES)
606		pr_warn("Configured crash mem ranges may not be enough\n");
607	else
608		extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr);
609
610	return extra_sz;
611#endif
612	return 0;
613}
614
615/**
616 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
617 *                           segment needed to load kdump kernel.
618 * @image:                   Kexec image.
619 * @kbuf:                    Buffer contents and memory parameters.
620 *
621 * Returns 0 on success, negative errno on error.
622 */
623static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
624{
625	struct crash_mem *cmem = NULL;
626	unsigned long headers_sz;
627	void *headers = NULL;
628	int ret;
629
630	ret = get_crash_memory_ranges(&cmem);
631	if (ret)
632		goto out;
633
634	/* Setup elfcorehdr segment */
635	ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
636	if (ret) {
637		pr_err("Failed to prepare elf headers for the core\n");
638		goto out;
639	}
640
641	/* Fix the offset for backup region in the ELF header */
642	update_backup_region_phdr(image, headers);
643
644	kbuf->buffer = headers;
645	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
646	kbuf->bufsz = headers_sz;
647	kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
648	kbuf->top_down = false;
649
650	ret = kexec_add_buffer(kbuf);
651	if (ret) {
652		vfree(headers);
653		goto out;
654	}
655
656	image->elf_load_addr = kbuf->mem;
657	image->elf_headers_sz = headers_sz;
658	image->elf_headers = headers;
659out:
660	kfree(cmem);
661	return ret;
662}
663
664/**
665 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
666 *                                 to load kdump kernel.
667 * @image:                         Kexec image.
668 * @kbuf:                          Buffer contents and memory parameters.
669 *
670 * Returns 0 on success, negative errno on error.
671 */
672int load_crashdump_segments_ppc64(struct kimage *image,
673				  struct kexec_buf *kbuf)
674{
675	int ret;
676
677	/* Load backup segment - first 64K bytes of the crashing kernel */
678	ret = load_backup_segment(image, kbuf);
679	if (ret) {
680		pr_err("Failed to load backup segment\n");
681		return ret;
682	}
683	kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem);
684
685	/* Load elfcorehdr segment - to export crashing kernel's vmcore */
686	ret = load_elfcorehdr_segment(image, kbuf);
687	if (ret) {
688		pr_err("Failed to load elfcorehdr segment\n");
689		return ret;
690	}
691	kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
692		      image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
693
694	return 0;
695}
696#endif
697
698/**
699 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
700 *                         variables and call setup_purgatory() to initialize
701 *                         common global variable.
702 * @image:                 kexec image.
703 * @slave_code:            Slave code for the purgatory.
704 * @fdt:                   Flattened device tree for the next kernel.
705 * @kernel_load_addr:      Address where the kernel is loaded.
706 * @fdt_load_addr:         Address where the flattened device tree is loaded.
707 *
708 * Returns 0 on success, negative errno on error.
709 */
710int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
711			  const void *fdt, unsigned long kernel_load_addr,
712			  unsigned long fdt_load_addr)
713{
714	struct device_node *dn = NULL;
715	int ret;
716
717	ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
718			      fdt_load_addr);
719	if (ret)
720		goto out;
721
722	if (image->type == KEXEC_TYPE_CRASH) {
723		u32 my_run_at_load = 1;
724
725		/*
726		 * Tell relocatable kernel to run at load address
727		 * via the word meant for that at 0x5c.
728		 */
729		ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
730						     &my_run_at_load,
731						     sizeof(my_run_at_load),
732						     false);
733		if (ret)
734			goto out;
735	}
736
737	/* Tell purgatory where to look for backup region */
738	ret = kexec_purgatory_get_set_symbol(image, "backup_start",
739					     &image->arch.backup_start,
740					     sizeof(image->arch.backup_start),
741					     false);
742	if (ret)
743		goto out;
744
745	/* Setup OPAL base & entry values */
746	dn = of_find_node_by_path("/ibm,opal");
747	if (dn) {
748		u64 val;
749
750		of_property_read_u64(dn, "opal-base-address", &val);
751		ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
752						     sizeof(val), false);
753		if (ret)
754			goto out;
755
756		of_property_read_u64(dn, "opal-entry-address", &val);
757		ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
758						     sizeof(val), false);
759	}
760out:
761	if (ret)
762		pr_err("Failed to setup purgatory symbols");
763	of_node_put(dn);
764	return ret;
765}
766
767/**
768 * cpu_node_size - Compute the size of a CPU node in the FDT.
769 *                 This should be done only once and the value is stored in
770 *                 a static variable.
771 * Returns the max size of a CPU node in the FDT.
772 */
773static unsigned int cpu_node_size(void)
774{
775	static unsigned int size;
776	struct device_node *dn;
777	struct property *pp;
778
779	/*
780	 * Don't compute it twice, we are assuming that the per CPU node size
781	 * doesn't change during the system's life.
782	 */
783	if (size)
784		return size;
785
786	dn = of_find_node_by_type(NULL, "cpu");
787	if (WARN_ON_ONCE(!dn)) {
788		// Unlikely to happen
789		return 0;
790	}
791
792	/*
793	 * We compute the sub node size for a CPU node, assuming it
794	 * will be the same for all.
795	 */
796	size += strlen(dn->name) + 5;
797	for_each_property_of_node(dn, pp) {
798		size += strlen(pp->name);
799		size += pp->length;
800	}
801
802	of_node_put(dn);
803	return size;
804}
805
806static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
807{
808	unsigned int cpu_nodes, extra_size = 0;
809	struct device_node *dn;
810	u64 usm_entries;
811#ifdef CONFIG_CRASH_HOTPLUG
812	unsigned int possible_cpu_nodes;
813#endif
814
815	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
816		return 0;
817
818	/*
819	 * For kdump kernel, account for linux,usable-memory and
820	 * linux,drconf-usable-memory properties. Get an approximate on the
821	 * number of usable memory entries and use for FDT size estimation.
822	 */
823	if (drmem_lmb_size()) {
824		usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
825			       (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
826		extra_size += (unsigned int)(usm_entries * sizeof(u64));
827	}
828
829	/*
830	 * Get the number of CPU nodes in the current DT. This allows to
831	 * reserve places for CPU nodes added since the boot time.
832	 */
833	cpu_nodes = 0;
834	for_each_node_by_type(dn, "cpu") {
835		cpu_nodes++;
836	}
837
838	if (cpu_nodes > boot_cpu_node_count)
839		extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
840
841#ifdef CONFIG_CRASH_HOTPLUG
842	/*
843	 * Make sure enough space is reserved to accommodate possible CPU nodes
844	 * in the crash FDT. This allows packing possible CPU nodes which are
845	 * not yet present in the system without regenerating the entire FDT.
846	 */
847	if (image->type == KEXEC_TYPE_CRASH) {
848		possible_cpu_nodes = num_possible_cpus() / threads_per_core;
849		if (possible_cpu_nodes > cpu_nodes)
850			extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size();
851	}
852#endif
853
854	return extra_size;
855}
856
857/**
858 * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
859 *                              setup FDT for kexec/kdump kernel.
860 * @image:                      kexec image being loaded.
861 *
862 * Returns the estimated extra size needed for kexec/kdump kernel FDT.
863 */
864unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
865{
866	unsigned int extra_size = 0;
867
868	// Budget some space for the password blob. There's already extra space
869	// for the key name
870	if (plpks_is_available())
871		extra_size += (unsigned int)plpks_get_passwordlen();
872
873	return extra_size + kdump_extra_fdt_size_ppc64(image);
874}
875
876static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
877			 const char *propname)
878{
879	const void *prop, *fdtprop;
880	int len = 0, fdtlen = 0;
881
882	prop = of_get_property(dn, propname, &len);
883	fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
884
885	if (fdtprop && !prop)
886		return fdt_delprop(fdt, node_offset, propname);
887	else if (prop)
888		return fdt_setprop(fdt, node_offset, propname, prop, len);
889	else
890		return -FDT_ERR_NOTFOUND;
891}
892
893static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
894{
895	struct device_node *dn;
896	int pci_offset, root_offset, ret = 0;
897
898	if (!firmware_has_feature(FW_FEATURE_LPAR))
899		return 0;
900
901	root_offset = fdt_path_offset(fdt, "/");
902	for_each_node_with_property(dn, dmapropname) {
903		pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
904		if (pci_offset < 0)
905			continue;
906
907		ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
908		if (ret < 0) {
909			of_node_put(dn);
910			break;
911		}
912		ret = copy_property(fdt, pci_offset, dn, dmapropname);
913		if (ret < 0) {
914			of_node_put(dn);
915			break;
916		}
917	}
918
919	return ret;
920}
921
922/**
923 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
924 *                       being loaded.
925 * @image:               kexec image being loaded.
926 * @fdt:                 Flattened device tree for the next kernel.
927 * @initrd_load_addr:    Address where the next initrd will be loaded.
928 * @initrd_len:          Size of the next initrd, or 0 if there will be none.
929 * @cmdline:             Command line for the next kernel, or NULL if there will
930 *                       be none.
931 *
932 * Returns 0 on success, negative errno on error.
933 */
934int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
935			unsigned long initrd_load_addr,
936			unsigned long initrd_len, const char *cmdline)
937{
938	struct crash_mem *umem = NULL, *rmem = NULL;
939	int i, nr_ranges, ret;
940
941#ifdef CONFIG_CRASH_DUMP
942	/*
943	 * Restrict memory usage for kdump kernel by setting up
944	 * usable memory ranges and memory reserve map.
945	 */
946	if (image->type == KEXEC_TYPE_CRASH) {
947		ret = get_usable_memory_ranges(&umem);
948		if (ret)
949			goto out;
950
951		ret = update_usable_mem_fdt(fdt, umem);
952		if (ret) {
953			pr_err("Error setting up usable-memory property for kdump kernel\n");
954			goto out;
955		}
956
957		/*
958		 * Ensure we don't touch crashed kernel's memory except the
959		 * first 64K of RAM, which will be backed up.
960		 */
961		ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
962				      crashk_res.start - BACKUP_SRC_SIZE);
963		if (ret) {
964			pr_err("Error reserving crash memory: %s\n",
965			       fdt_strerror(ret));
966			goto out;
967		}
968
969		/* Ensure backup region is not used by kdump/capture kernel */
970		ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
971				      BACKUP_SRC_SIZE);
972		if (ret) {
973			pr_err("Error reserving memory for backup: %s\n",
974			       fdt_strerror(ret));
975			goto out;
976		}
977	}
978#endif
979
980	/* Update cpus nodes information to account hotplug CPUs. */
981	ret =  update_cpus_node(fdt);
982	if (ret < 0)
983		goto out;
984
985	ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
986	if (ret < 0)
987		goto out;
988
989	ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
990	if (ret < 0)
991		goto out;
992
993	/* Update memory reserve map */
994	ret = get_reserved_memory_ranges(&rmem);
995	if (ret)
996		goto out;
997
998	nr_ranges = rmem ? rmem->nr_ranges : 0;
999	for (i = 0; i < nr_ranges; i++) {
1000		u64 base, size;
1001
1002		base = rmem->ranges[i].start;
1003		size = rmem->ranges[i].end - base + 1;
1004		ret = fdt_add_mem_rsv(fdt, base, size);
1005		if (ret) {
1006			pr_err("Error updating memory reserve map: %s\n",
1007			       fdt_strerror(ret));
1008			goto out;
1009		}
1010	}
1011
1012	// If we have PLPKS active, we need to provide the password to the new kernel
1013	if (plpks_is_available())
1014		ret = plpks_populate_fdt(fdt);
1015
1016out:
1017	kfree(rmem);
1018	kfree(umem);
1019	return ret;
1020}
1021
1022/**
1023 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1024 *                              tce-table, reserved-ranges & such (exclude
1025 *                              memory ranges) as they can't be used for kexec
1026 *                              segment buffer. Sets kbuf->mem when a suitable
1027 *                              memory hole is found.
1028 * @kbuf:                       Buffer contents and memory parameters.
1029 *
1030 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1031 *
1032 * Returns 0 on success, negative errno on error.
1033 */
1034int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
1035{
1036	struct crash_mem **emem;
1037	u64 buf_min, buf_max;
1038	int ret;
1039
1040	/* Look up the exclude ranges list while locating the memory hole */
1041	emem = &(kbuf->image->arch.exclude_ranges);
1042	if (!(*emem) || ((*emem)->nr_ranges == 0)) {
1043		pr_warn("No exclude range list. Using the default locate mem hole method\n");
1044		return kexec_locate_mem_hole(kbuf);
1045	}
1046
1047	buf_min = kbuf->buf_min;
1048	buf_max = kbuf->buf_max;
1049	/* Segments for kdump kernel should be within crashkernel region */
1050	if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
1051		buf_min = (buf_min < crashk_res.start ?
1052			   crashk_res.start : buf_min);
1053		buf_max = (buf_max > crashk_res.end ?
1054			   crashk_res.end : buf_max);
1055	}
1056
1057	if (buf_min > buf_max) {
1058		pr_err("Invalid buffer min and/or max values\n");
1059		return -EINVAL;
1060	}
1061
1062	if (kbuf->top_down)
1063		ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
1064						     *emem);
1065	else
1066		ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
1067						      *emem);
1068
1069	/* Add the buffer allocated to the exclude list for the next lookup */
1070	if (!ret) {
1071		add_mem_range(emem, kbuf->mem, kbuf->memsz);
1072		sort_memory_ranges(*emem, true);
1073	} else {
1074		pr_err("Failed to locate memory buffer of size %lu\n",
1075		       kbuf->memsz);
1076	}
1077	return ret;
1078}
1079
1080/**
1081 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1082 *                                 kexec segments.
1083 * @image:                         kexec image being loaded.
1084 * @buf:                           Buffer pointing to elf data.
1085 * @buf_len:                       Length of the buffer.
1086 *
1087 * Returns 0 on success, negative errno on error.
1088 */
1089int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
1090				  unsigned long buf_len)
1091{
1092	int ret;
1093
1094	/* Get exclude memory ranges needed for setting up kexec segments */
1095	ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
1096	if (ret) {
1097		pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1098		return ret;
1099	}
1100
1101	return kexec_image_probe_default(image, buf, buf_len);
1102}
1103
1104/**
1105 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1106 *                                      while loading the image.
1107 * @image:                              kexec image being loaded.
1108 *
1109 * Returns 0 on success, negative errno on error.
1110 */
1111int arch_kimage_file_post_load_cleanup(struct kimage *image)
1112{
1113	kfree(image->arch.exclude_ranges);
1114	image->arch.exclude_ranges = NULL;
1115
1116	vfree(image->arch.backup_buf);
1117	image->arch.backup_buf = NULL;
1118
1119	vfree(image->elf_headers);
1120	image->elf_headers = NULL;
1121	image->elf_headers_sz = 0;
1122
1123	kvfree(image->arch.fdt);
1124	image->arch.fdt = NULL;
1125
1126	return kexec_image_post_load_cleanup_default(image);
1127}
1128