1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <string.h>
29#include <mach-o/loader.h>
30#include <sys/types.h>
31
32#if KERNEL
33    #ifdef MACH_ASSERT
34        #undef MACH_ASSERT
35    #endif
36    #define MACH_ASSERT 1
37    #include <kern/assert.h>
38#else
39    #include <assert.h>
40#endif
41
42#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
43#include <AssertMacros.h>
44
45#include "kxld_demangle.h"
46#include "kxld_dict.h"
47#include "kxld_object.h"
48#include "kxld_reloc.h"
49#include "kxld_sect.h"
50#include "kxld_sym.h"
51#include "kxld_symtab.h"
52#include "kxld_util.h"
53#include "kxld_vtable.h"
54
55#define VTABLE_ENTRY_SIZE_32 4
56#define VTABLE_HEADER_LEN_32 2
57#define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
58
59#define VTABLE_ENTRY_SIZE_64 8
60#define VTABLE_HEADER_LEN_64 2
61#define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
62
63static void  get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
64    u_int *vtable_header_size);
65
66static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
67    const KXLDSect *sect, const KXLDRelocator *relocator);
68
69static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable,
70    const KXLDSym *vtable_sym, const KXLDRelocator *relocator,
71    const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols);
72
73static kern_return_t init_by_entries(KXLDVTable *vtable,
74    const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols);
75
76/*******************************************************************************
77*******************************************************************************/
78kern_return_t
79kxld_vtable_init(KXLDVTable *vtable, const KXLDSym *vtable_sym,
80    const KXLDObject *object, const KXLDDict *defined_cxx_symbols)
81{
82    kern_return_t rval = KERN_FAILURE;
83    const KXLDArray *extrelocs = NULL;
84    const KXLDRelocator *relocator = NULL;
85    const KXLDSect *vtable_sect = NULL;
86    char *demangled_name = NULL;
87    size_t demangled_length = 0;
88
89    check(vtable);
90    check(vtable_sym);
91    check(object);
92
93    relocator = kxld_object_get_relocator(object);
94
95    vtable_sect = kxld_object_get_section_by_index(object,
96        vtable_sym->sectnum);
97    require_action(vtable_sect, finish, rval=KERN_FAILURE);
98
99    vtable->name = vtable_sym->name;
100    vtable->vtable = vtable_sect->data +
101        kxld_sym_get_section_offset(vtable_sym, vtable_sect);
102
103    if (kxld_object_is_linked(object)) {
104        rval = init_by_entries(vtable, relocator, defined_cxx_symbols);
105        require_noerr(rval, finish);
106
107        vtable->is_patched = TRUE;
108    } else {
109        if (kxld_object_is_final_image(object)) {
110            extrelocs = kxld_object_get_extrelocs(object);
111            require_action(extrelocs, finish,
112                rval=KERN_FAILURE;
113                kxld_log(kKxldLogPatching, kKxldLogErr,
114                    kKxldLogMalformedVTable,
115                    kxld_demangle(vtable->name,
116                        &demangled_name, &demangled_length)));
117
118            rval = init_by_entries_and_relocs(vtable, vtable_sym,
119                relocator, extrelocs, defined_cxx_symbols);
120            require_noerr(rval, finish);
121        } else {
122            require_action(kxld_sect_get_num_relocs(vtable_sect) > 0, finish,
123                rval=KERN_FAILURE;
124                kxld_log(kKxldLogPatching, kKxldLogErr,
125                    kKxldLogMalformedVTable,
126                    kxld_demangle(vtable->name,
127                        &demangled_name, &demangled_length)));
128
129            rval = init_by_relocs(vtable, vtable_sym, vtable_sect, relocator);
130            require_noerr(rval, finish);
131        }
132
133        vtable->is_patched = FALSE;
134    }
135
136    rval = KERN_SUCCESS;
137finish:
138    if (demangled_name) kxld_free(demangled_name, demangled_length);
139
140    return rval;
141}
142
143/*******************************************************************************
144*******************************************************************************/
145static void
146get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
147    u_int *vtable_header_size)
148{
149    check(vtable_entry_size);
150    check(vtable_header_size);
151
152    if (is_32_bit) {
153        *vtable_entry_size = VTABLE_ENTRY_SIZE_32;
154        *vtable_header_size = VTABLE_HEADER_SIZE_32;
155    } else {
156        *vtable_entry_size = VTABLE_ENTRY_SIZE_64;
157        *vtable_header_size = VTABLE_HEADER_SIZE_64;
158    }
159}
160
161/*******************************************************************************
162* Initializes a vtable object by matching up relocation entries to the vtable's
163* entries and finding the corresponding symbols.
164*******************************************************************************/
165static kern_return_t
166init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
167    const KXLDSect *sect, const KXLDRelocator *relocator)
168{
169    kern_return_t rval = KERN_FAILURE;
170    KXLDReloc *reloc = NULL;
171    KXLDVTableEntry *entry = NULL;
172    KXLDSym *sym = NULL;
173    kxld_addr_t vtable_base_offset = 0;
174    kxld_addr_t entry_offset = 0;
175    u_int i = 0;
176    u_int nentries = 0;
177    u_int vtable_entry_size = 0;
178    u_int vtable_header_size = 0;
179    u_int base_reloc_index = 0;
180    u_int reloc_index = 0;
181
182    check(vtable);
183    check(vtable_sym);
184    check(sect);
185    check(relocator);
186
187    /* Find the first entry past the vtable padding */
188
189    (void) get_vtable_base_sizes(relocator->is_32_bit,
190        &vtable_entry_size, &vtable_header_size);
191
192    vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) +
193        vtable_header_size;
194
195    /* Find the relocation entry at the start of the vtable */
196
197    rval = kxld_reloc_get_reloc_index_by_offset(&sect->relocs,
198        vtable_base_offset, &base_reloc_index);
199    require_noerr(rval, finish);
200
201    /* Count the number of consecutive relocation entries to find the number of
202     * vtable entries.  For some reason, the __TEXT,__const relocations are
203     * sorted in descending order, so we have to walk backwards.  Also, make
204     * sure we don't run off the end of the section's relocs.
205     */
206
207    reloc_index = base_reloc_index;
208    entry_offset = vtable_base_offset;
209    reloc = kxld_array_get_item(&sect->relocs, reloc_index);
210    while (reloc->address == entry_offset) {
211        ++nentries;
212        if (!reloc_index) break;
213
214        --reloc_index;
215
216        reloc = kxld_array_get_item(&sect->relocs, reloc_index);
217        entry_offset += vtable_entry_size;
218    }
219
220    /* Allocate the symbol index */
221
222    rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
223    require_noerr(rval, finish);
224
225    /* Find the symbols for each vtable entry */
226
227    for (i = 0; i < vtable->entries.nitems; ++i) {
228        reloc = kxld_array_get_item(&sect->relocs, base_reloc_index - i);
229        entry = kxld_array_get_item(&vtable->entries, i);
230
231        /* If we can't find a symbol, it means it is a locally-defined,
232         * non-external symbol that has been stripped.  We don't patch over
233         * locally-defined symbols, so we leave the symbol as NULL and just
234         * skip it.  We won't be able to patch subclasses with this symbol,
235         * but there isn't much we can do about that.
236         */
237        sym = kxld_reloc_get_symbol(relocator, reloc, sect->data);
238
239        entry->unpatched.sym = sym;
240        entry->unpatched.reloc = reloc;
241    }
242
243    rval = KERN_SUCCESS;
244finish:
245    return rval;
246}
247
248/*******************************************************************************
249* Initializes a vtable object by reading the symbol values out of the vtable
250* entries and performing reverse symbol lookups on those values.
251*******************************************************************************/
252static kern_return_t
253init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator,
254    const KXLDDict *defined_cxx_symbols)
255{
256    kern_return_t rval = KERN_FAILURE;
257    KXLDVTableEntry *tmpentry = NULL;
258    KXLDSym *sym = NULL;
259    kxld_addr_t entry_value = 0;
260    u_long entry_offset;
261    u_int vtable_entry_size = 0;
262    u_int vtable_header_size = 0;
263    u_int nentries = 0;
264    u_int i = 0;
265
266    check(vtable);
267    check(relocator);
268
269    (void) get_vtable_base_sizes(relocator->is_32_bit,
270        &vtable_entry_size, &vtable_header_size);
271
272    /* Count the number of entries (the vtable is null-terminated) */
273
274    entry_offset = vtable_header_size;
275    while (1) {
276        entry_value = kxld_relocator_get_pointer_at_addr(relocator,
277            vtable->vtable, entry_offset);
278        if (!entry_value) break;
279
280        entry_offset += vtable_entry_size;
281        ++nentries;
282    }
283
284    /* Allocate the symbol index */
285
286    rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
287    require_noerr(rval, finish);
288
289    /* Look up the symbols for each entry */
290
291    for (i = 0, entry_offset = vtable_header_size;
292         i < vtable->entries.nitems;
293         ++i, entry_offset += vtable_entry_size)
294    {
295        entry_value = kxld_relocator_get_pointer_at_addr(relocator,
296            vtable->vtable, entry_offset);
297
298        /* If we can't find the symbol, it means that the virtual function was
299         * defined inline.  There's not much I can do about this; it just means
300         * I can't patch this function.
301         */
302        tmpentry = kxld_array_get_item(&vtable->entries, i);
303        sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
304
305        if (sym) {
306            tmpentry->patched.name = sym->name;
307            tmpentry->patched.addr = sym->link_addr;
308        } else {
309            tmpentry->patched.name = NULL;
310            tmpentry->patched.addr = 0;
311        }
312    }
313
314    rval = KERN_SUCCESS;
315finish:
316    return rval;
317}
318
319/*******************************************************************************
320* Initializes vtables by performing a reverse lookup on symbol values when
321* they exist in the vtable entry, and by looking through a matching relocation
322* entry when the vtable entry is NULL.
323*
324* Final linked images require this hybrid vtable initialization approach
325* because they are already internally resolved.  This means that the vtables
326* contain valid entries to local symbols, but still have relocation entries for
327* external symbols.
328*******************************************************************************/
329static kern_return_t
330init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
331    const KXLDRelocator *relocator, const KXLDArray *relocs,
332    const KXLDDict *defined_cxx_symbols)
333{
334    kern_return_t rval = KERN_FAILURE;
335    KXLDReloc *reloc = NULL;
336    KXLDVTableEntry *tmpentry = NULL;
337    KXLDSym *sym = NULL;
338    u_int vtable_entry_size = 0;
339    u_int vtable_header_size = 0;
340    kxld_addr_t entry_value = 0;
341    u_long entry_offset = 0;
342    u_int nentries = 0;
343    u_int i = 0;
344    char *demangled_name1 = NULL;
345    size_t demangled_length1 = 0;
346
347    check(vtable);
348    check(vtable_sym);
349    check(relocator);
350    check(relocs);
351
352    /* Find the first entry and its offset past the vtable padding */
353
354    (void) get_vtable_base_sizes(relocator->is_32_bit,
355        &vtable_entry_size, &vtable_header_size);
356
357    /* In a final linked image, a vtable slot is valid if it is nonzero
358     * (meaning the userspace linker has already resolved it) or if it has
359     * a relocation entry.  We'll know the end of the vtable when we find a
360     * slot that meets neither of these conditions.
361     */
362    entry_offset = vtable_header_size;
363    while (1) {
364        entry_value = kxld_relocator_get_pointer_at_addr(relocator,
365            vtable->vtable, entry_offset);
366        if (!entry_value) {
367            reloc = kxld_reloc_get_reloc_by_offset(relocs,
368                vtable_sym->base_addr + entry_offset);
369            if (!reloc) break;
370        }
371
372        ++nentries;
373        entry_offset += vtable_entry_size;
374    }
375
376    /* Allocate the symbol index */
377
378    rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
379    require_noerr(rval, finish);
380
381    /* Find the symbols for each vtable entry */
382
383    for (i = 0, entry_offset = vtable_header_size;
384         i < vtable->entries.nitems;
385         ++i, entry_offset += vtable_entry_size)
386    {
387        entry_value = kxld_relocator_get_pointer_at_addr(relocator,
388            vtable->vtable, entry_offset);
389
390        /* If we can't find a symbol, it means it is a locally-defined,
391         * non-external symbol that has been stripped.  We don't patch over
392         * locally-defined symbols, so we leave the symbol as NULL and just
393         * skip it.  We won't be able to patch subclasses with this symbol,
394         * but there isn't much we can do about that.
395         */
396        if (entry_value) {
397            reloc = NULL;
398            sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
399        } else {
400            reloc = kxld_reloc_get_reloc_by_offset(relocs,
401                vtable_sym->base_addr + entry_offset);
402            require_action(reloc, finish,
403                rval=KERN_FAILURE;
404                kxld_log(kKxldLogPatching, kKxldLogErr,
405                    kKxldLogMalformedVTable,
406                    kxld_demangle(vtable->name, &demangled_name1,
407                        &demangled_length1)));
408
409            sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL);
410        }
411
412        tmpentry = kxld_array_get_item(&vtable->entries, i);
413        tmpentry->unpatched.reloc = reloc;
414        tmpentry->unpatched.sym = sym;
415    }
416
417    rval = KERN_SUCCESS;
418finish:
419    return rval;
420}
421
422/*******************************************************************************
423*******************************************************************************/
424void
425kxld_vtable_clear(KXLDVTable *vtable)
426{
427    check(vtable);
428
429    vtable->vtable = NULL;
430    vtable->name = NULL;
431    vtable->is_patched = FALSE;
432    kxld_array_clear(&vtable->entries);
433}
434
435/*******************************************************************************
436*******************************************************************************/
437void
438kxld_vtable_deinit(KXLDVTable *vtable)
439{
440    check(vtable);
441
442    kxld_array_deinit(&vtable->entries);
443    bzero(vtable, sizeof(*vtable));
444}
445
446/*******************************************************************************
447*******************************************************************************/
448KXLDVTableEntry *
449kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset,
450    boolean_t is_32_bit)
451{
452    KXLDVTableEntry *rval = NULL;
453    u_int vtable_entry_size = 0;
454    u_int vtable_header_size = 0;
455    u_int vtable_entry_idx = 0;
456
457    (void) get_vtable_base_sizes(is_32_bit,
458        &vtable_entry_size, &vtable_header_size);
459
460    if (offset % vtable_entry_size) {
461        goto finish;
462    }
463
464    vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size);
465    rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx);
466finish:
467    return rval;
468}
469
470/*******************************************************************************
471* Patching vtables allows us to preserve binary compatibility across releases.
472*******************************************************************************/
473kern_return_t
474kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable,
475    KXLDObject *object)
476{
477    kern_return_t rval = KERN_FAILURE;
478    const KXLDSymtab *symtab = NULL;
479    const KXLDSym *sym = NULL;
480    KXLDVTableEntry *child_entry = NULL;
481    KXLDVTableEntry *parent_entry = NULL;
482    u_int symindex = 0;
483    u_int i = 0;
484    char *demangled_name1 = NULL;
485    char *demangled_name2 = NULL;
486    char *demangled_name3 = NULL;
487    size_t demangled_length1 = 0;
488    size_t demangled_length2 = 0;
489    size_t demangled_length3 = 0;
490    boolean_t failure = FALSE;
491
492    check(vtable);
493    check(super_vtable);
494
495    symtab = kxld_object_get_symtab(object);
496
497    require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS);
498    require_action(super_vtable->is_patched, finish, rval=KERN_FAILURE);
499    require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish,
500        rval=KERN_FAILURE;
501        kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable,
502            kxld_demangle(vtable->name, &demangled_name1, &demangled_length1)));
503
504    for (i = 0; i < super_vtable->entries.nitems; ++i) {
505        child_entry = kxld_array_get_item(&vtable->entries, i);
506        parent_entry = kxld_array_get_item(&super_vtable->entries, i);
507
508        /* The child entry can be NULL when a locally-defined, non-external
509         * symbol is stripped.  We wouldn't patch this entry anyway, so we
510         * just skip it.
511         */
512
513        if (!child_entry->unpatched.sym) continue;
514
515        /* It's possible for the patched parent entry not to have a symbol
516         * (e.g. when the definition is inlined).  We can't patch this entry no
517         * matter what, so we'll just skip it and die later if it's a problem
518         * (which is not likely).
519         */
520
521        if (!parent_entry->patched.name) continue;
522
523        /* 1) If the symbol is defined locally, do not patch */
524
525        if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue;
526
527        /* 2) If the child is a pure virtual function, do not patch.
528         * In general, we want to proceed with patching when the symbol is
529         * externally defined because pad slots fall into this category.
530         * The pure virtual function symbol is special case, as the pure
531         * virtual property itself overrides the parent's implementation.
532         */
533
534        if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue;
535
536        /* 3) If the symbols are the same, do not patch */
537
538        if (streq(child_entry->unpatched.sym->name,
539                  parent_entry->patched.name))
540        {
541            continue;
542        }
543
544        /* 4) If the parent vtable entry is a pad slot, and the child does not
545         * match it, then the child was built against a newer version of the
546         * libraries, so it is binary-incompatible.
547         */
548
549        require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name),
550            finish, rval=KERN_FAILURE;
551            kxld_log(kKxldLogPatching, kKxldLogErr,
552                kKxldLogParentOutOfDate,
553                kxld_demangle(super_vtable->name, &demangled_name1,
554                    &demangled_length1),
555                kxld_demangle(vtable->name, &demangled_name2,
556                    &demangled_length2)));
557
558#if KXLD_USER_OR_STRICT_PATCHING
559        /* 5) If we are doing strict patching, we prevent kexts from declaring
560         * virtual functions and not implementing them.  We can tell if a
561         * virtual function is declared but not implemented because we resolve
562         * symbols before patching; an unimplemented function will still be
563         * undefined at this point.  We then look at whether the symbol has
564         * the same class prefix as the vtable.  If it does, the symbol was
565         * declared as part of the class and not inherited, which means we
566         * should not patch it.
567         */
568
569        if (kxld_object_target_supports_strict_patching(object) &&
570            !kxld_sym_is_defined(child_entry->unpatched.sym))
571        {
572            char class_name[KXLD_MAX_NAME_LEN];
573            char function_prefix[KXLD_MAX_NAME_LEN];
574            u_long function_prefix_len = 0;
575
576            rval = kxld_sym_get_class_name_from_vtable_name(vtable->name,
577                class_name, sizeof(class_name));
578            require_noerr(rval, finish);
579
580            function_prefix_len =
581                kxld_sym_get_function_prefix_from_class_name(class_name,
582                    function_prefix, sizeof(function_prefix));
583            require(function_prefix_len, finish);
584
585            if (!strncmp(child_entry->unpatched.sym->name,
586                    function_prefix, function_prefix_len))
587            {
588                failure = TRUE;
589                kxld_log(kKxldLogPatching, kKxldLogErr,
590                    "The %s is unpatchable because its class declares the "
591                    "method '%s' without providing an implementation.",
592                    kxld_demangle(vtable->name,
593                        &demangled_name1, &demangled_length1),
594                    kxld_demangle(child_entry->unpatched.sym->name,
595                        &demangled_name2, &demangled_length2));
596                continue;
597            }
598        }
599#endif /* KXLD_USER_OR_STRICT_PATCHING */
600
601        /* 6) The child symbol is unresolved and different from its parent, so
602         * we need to patch it up.  We do this by modifying the relocation
603         * entry of the vtable entry to point to the symbol of the parent
604         * vtable entry.  If that symbol does not exist (i.e. we got the data
605         * from a link state object's vtable representation), then we create a
606         * new symbol in the symbol table and point the relocation entry to
607         * that.
608         */
609
610        sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab,
611            parent_entry->patched.name);
612        if (!sym) {
613            rval = kxld_object_add_symbol(object, parent_entry->patched.name,
614                parent_entry->patched.addr, &sym);
615            require_noerr(rval, finish);
616        }
617        require_action(sym, finish, rval=KERN_FAILURE);
618
619        rval = kxld_symtab_get_sym_index(symtab, sym, &symindex);
620        require_noerr(rval, finish);
621
622        rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex);
623        require_noerr(rval, finish);
624        kxld_log(kKxldLogPatching, kKxldLogDetail,
625            "In vtable '%s', patching '%s' with '%s'.",
626            kxld_demangle(vtable->name, &demangled_name1, &demangled_length1),
627            kxld_demangle(child_entry->unpatched.sym->name,
628                &demangled_name2, &demangled_length2),
629            kxld_demangle(sym->name, &demangled_name3, &demangled_length3));
630
631        rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym);
632        require_noerr(rval, finish);
633
634        child_entry->unpatched.sym = sym;
635
636        /*
637         * The C++ ABI requires that functions be aligned on a 2-byte boundary:
638         * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
639         * If the LSB of any virtual function's link address is 1, then the
640         * compiler has violated that part of the ABI, and we're going to panic
641         * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some
642         * context.
643         */
644        assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1));
645    }
646
647    require_action(!failure, finish, rval=KERN_FAILURE);
648
649    /* Change the vtable representation from the unpatched layout to the
650     * patched layout.
651     */
652
653    for (i = 0; i < vtable->entries.nitems; ++i) {
654        char *name;
655        kxld_addr_t addr;
656
657        child_entry = kxld_array_get_item(&vtable->entries, i);
658        if (child_entry->unpatched.sym) {
659            name = child_entry->unpatched.sym->name;
660            addr = child_entry->unpatched.sym->link_addr;
661        } else {
662            name = NULL;
663            addr = 0;
664        }
665
666        child_entry->patched.name = name;
667        child_entry->patched.addr = addr;
668    }
669
670    vtable->is_patched = TRUE;
671    rval = KERN_SUCCESS;
672
673finish:
674    if (demangled_name1) kxld_free(demangled_name1, demangled_length1);
675    if (demangled_name2) kxld_free(demangled_name2, demangled_length2);
676    if (demangled_name3) kxld_free(demangled_name3, demangled_length3);
677
678    return rval;
679}
680
681