1/**
2 * \file
3 * \brief Kernel capability deletion-related operations
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <stdio.h>
16#include <string.h>
17#include <kernel.h>
18#include <barrelfish_kpi/syscalls.h>
19#include <barrelfish_kpi/paging_arch.h>
20#include <barrelfish_kpi/lmp.h>
21#include <offsets.h>
22#include <capabilities.h>
23#include <cap_predicates.h>
24#include <distcaps.h>
25#include <dispatch.h>
26#include <paging_kernel_arch.h>
27#include <mdb/mdb.h>
28#include <mdb/mdb_tree.h>
29#include <trace/trace.h>
30#include <wakeup.h>
31
32struct cte *clear_head, *clear_tail;
33struct cte *delete_head, *delete_tail;
34
35static errval_t caps_try_delete(struct cte *cte);
36static errval_t cleanup_copy(struct cte *cte);
37static errval_t cleanup_last(struct cte *cte, struct cte *ret_ram_cap);
38static void caps_mark_revoke_copy(struct cte *cte);
39static void caps_mark_revoke_generic(struct cte *cte);
40static void clear_list_prepend(struct cte *cte);
41static errval_t caps_copyout_last(struct cte *target, struct cte *ret_cte);
42
43/**
44 * \brief Try a "simple" delete of a cap. If this fails, the monitor needs to
45 * negotiate a delete across the system.
46 */
47static errval_t caps_try_delete(struct cte *cte)
48{
49    TRACE_CAP_MSG("trying simple delete", cte);
50    if (distcap_is_in_delete(cte) || cte->mdbnode.locked) {
51        // locked or already in process of being deleted
52        return SYS_ERR_CAP_LOCKED;
53    }
54    if (distcap_is_foreign(cte) || has_copies(cte)) {
55        return cleanup_copy(cte);
56    }
57    else if (cte->mdbnode.remote_copies
58             || cte->cap.type == ObjType_L1CNode
59             || cte->cap.type == ObjType_L2CNode
60             || cte->cap.type == ObjType_Dispatcher)
61    {
62        return SYS_ERR_DELETE_LAST_OWNED;
63    }
64    else {
65        return cleanup_last(cte, NULL);
66    }
67}
68
69/**
70 * \brief Delete the last copy of a cap in the entire system.
71 * \bug Somewhere in the delete process, the remote_ancs property should be
72 *      propagated to (remote) immediate descendants.
73 */
74errval_t caps_delete_last(struct cte *cte, struct cte *ret_ram_cap)
75{
76    errval_t err;
77    assert(!has_copies(cte));
78
79    if (cte->mdbnode.remote_copies) {
80        printk(LOG_WARN, "delete_last but remote_copies is set\n");
81    }
82
83    TRACE_CAP_MSG("deleting last", cte);
84
85    // try simple delete
86    // XXX: this really should always fail, enforce that? -MN
87    // XXX: this is probably not the way we should enforce/check this -SG
88    err = caps_try_delete(cte);
89    if (err_no(err) != SYS_ERR_DELETE_LAST_OWNED &&
90        err_no(err) != SYS_ERR_CAP_LOCKED) {
91        return err;
92    }
93
94    // CNodes and dcbs contain further CTEs, so cannot simply be deleted
95    // instead, we place them in a clear list, which is progressivly worked
96    // through until each list element contains only ctes that point to
97    // other CNodes or dcbs, at which point they are scheduled for final
98    // deletion, which only happens when the clear lists are empty.
99
100    if (cte->cap.type == ObjType_L1CNode ||
101        cte->cap.type == ObjType_L2CNode)
102    {
103        debug(SUBSYS_CAPS, "deleting last copy of cnode: %p\n", cte);
104        // Mark all non-Null slots for deletion
105        for (cslot_t i = 0; i < cnode_get_slots(&cte->cap); i++) {
106            struct cte *slot = caps_locate_slot(get_address(&cte->cap), i);
107            caps_mark_revoke_generic(slot);
108        }
109
110        assert(cte->delete_node.next == NULL || delete_head == cte);
111        cte->delete_node.next = NULL;
112        clear_list_prepend(cte);
113
114        return SYS_ERR_OK;
115    }
116    else if (cte->cap.type == ObjType_Dispatcher)
117    {
118        debug(SUBSYS_CAPS, "deleting last copy of dispatcher: %p\n", cte);
119        struct capability *cap = &cte->cap;
120        struct dcb *dcb = cap->u.dispatcher.dcb;
121
122        // Remove from queue
123        scheduler_remove(dcb);
124        // Reset current if it was deleted
125        if (dcb_current == dcb) {
126            dcb_current = NULL;
127        }
128
129        // Remove from wakeup queue
130        wakeup_remove(dcb);
131
132        // Notify monitor
133        if (monitor_ep.u.endpoint.listener == dcb) {
134            printk(LOG_ERR, "monitor terminated; expect badness!\n");
135            monitor_ep.u.endpoint.listener = NULL;
136        } else if (monitor_ep.u.endpoint.listener != NULL) {
137            uintptr_t payload = dcb->domain_id;
138            err = lmp_deliver_payload(&monitor_ep, NULL, &payload, 1, false, false);
139            if (err_is_fail(err)) {
140                printk(LOG_NOTE, "while notifying monitor about domain exit: %"PRIuERRV".\n", err);
141                printk(LOG_NOTE, "please add the console output to the following bug report: https://code.systems.ethz.ch/T78\n");
142            }
143            assert(err_is_ok(err));
144        }
145
146        caps_mark_revoke_generic(&dcb->cspace);
147        caps_mark_revoke_generic(&dcb->disp_cte);
148        assert(cte->delete_node.next == NULL || delete_head == cte);
149        cte->delete_node.next = NULL;
150        clear_list_prepend(cte);
151
152        return SYS_ERR_OK;
153    }
154    else
155    {
156        // last copy, perform object cleanup
157        return cleanup_last(cte, ret_ram_cap);
158    }
159}
160
161/**
162 * \brief Cleanup a cap copy but not the object represented by the cap
163 */
164static errval_t
165cleanup_copy(struct cte *cte)
166{
167    errval_t err;
168
169    TRACE_CAP_MSG("cleaning up copy", cte);
170
171    struct capability *cap = &cte->cap;
172
173    if (type_is_vnode(cap->type) ||
174        cap->type == ObjType_Frame ||
175        cap->type == ObjType_DevFrame)
176    {
177        unmap_capability(cte);
178    }
179
180    if (distcap_is_foreign(cte)) {
181        TRACE_CAP_MSG("cleaning up non-owned copy", cte);
182        if (cte->mdbnode.remote_copies || cte->mdbnode.remote_descs) {
183            struct cte *ancestor = mdb_find_ancestor(cte);
184            if (ancestor) {
185                mdb_set_relations(ancestor, RRELS_DESC_BIT, RRELS_DESC_BIT);
186            }
187        }
188    }
189
190    err = mdb_remove(cte);
191    if (err_is_fail(err)) {
192        return err;
193    }
194    TRACE_CAP_MSG("cleaned up copy", cte);
195    assert(!mdb_reachable(cte));
196    memset(cte, 0, sizeof(*cte));
197
198    return SYS_ERR_OK;
199}
200
201/**
202 * \brief Cleanup the last cap copy for an object and the object itself
203 */
204STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all RAM-backed cap types");
205static errval_t
206cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
207{
208    errval_t err;
209
210    TRACE_CAP_MSG("cleaning up last copy", cte);
211    struct capability *cap = &cte->cap;
212
213    assert(!has_copies(cte));
214    if (cte->mdbnode.remote_copies) {
215        printk(LOG_WARN, "cleanup_last but remote_copies is set\n");
216    }
217
218    // When deleting the last copy of a mapping cap, destroy the mapping
219    if (type_is_mapping(cte->cap.type)) {
220        struct Frame_Mapping *mapping = &cte->cap.u.frame_mapping;
221        // Only if the ptable the mapping is pointing to is a vnode type
222        if (type_is_vnode(mapping->ptable->cap.type)) {
223            err = page_mappings_unmap(&mapping->ptable->cap, cte);
224            if (err_is_fail(err)) {
225                char buf[256];
226                sprint_cap(buf, 256, &cte->cap);
227                printk(LOG_WARN, "page_mappings_unmap failed for %s\n", buf);
228                return err;
229            }
230        }
231    }
232
233    if (ret_ram_cap && ret_ram_cap->cap.type != ObjType_Null) {
234        return SYS_ERR_SLOT_IN_USE;
235    }
236
237    struct RAM ram = { .bytes = 0 };
238    size_t len = sizeof(struct RAM) / sizeof(uintptr_t) + 1;
239
240    if (!has_descendants(cte) && !has_ancestors(cte)) {
241        // List all RAM-backed capabilities here
242        // NB: ObjType_PhysAddr and ObjType_DevFrame caps are *not* RAM-backed!
243        switch(cap->type) {
244        case ObjType_RAM:
245        case ObjType_Frame:
246        case ObjType_L1CNode:
247        case ObjType_L2CNode:
248            ram.base = get_address(cap);
249            ram.bytes = get_size(cap);
250            break;
251
252        case ObjType_Dispatcher:
253            // Convert to genpaddr
254            ram.base = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)cap->u.dispatcher.dcb));
255            ram.bytes = OBJSIZE_DISPATCHER;
256            break;
257
258        default:
259            // Handle VNodes here
260            if(type_is_vnode(cap->type)) {
261                ram.base = get_address(cap);
262                ram.bytes = vnode_objsize(cap->type);
263            }
264            break;
265        }
266    }
267
268    // have cap to return to monitor but no allocated slot and no room in
269    // monitor channel; have user retry over monitor rpc interface
270    if (ram.bytes > 0 &&
271        !ret_ram_cap &&
272        monitor_ep.type == ObjType_EndPoint &&
273        err_is_fail(lmp_can_deliver_payload(&monitor_ep, len)))
274    {
275        return SYS_ERR_RETRY_THROUGH_MONITOR;
276    }
277
278
279    err = cleanup_copy(cte);
280    if (err_is_fail(err)) {
281        return err;
282    }
283
284    if(ram.bytes > 0) {
285        // Send back as RAM cap to monitor
286        if (ret_ram_cap) {
287            if (dcb_current != monitor_ep.u.endpoint.listener) {
288                printk(LOG_WARN, "sending fresh ram cap to non-monitor?\n");
289            }
290            assert(ret_ram_cap->cap.type == ObjType_Null);
291            ret_ram_cap->cap.u.ram = ram;
292            ret_ram_cap->cap.type = ObjType_RAM;
293            err = mdb_insert(ret_ram_cap);
294            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
295            assert(err_is_ok(err));
296            // note: this is a "success" code!
297            err = SYS_ERR_RAM_CAP_CREATED;
298        }
299        else if (monitor_ep.type && monitor_ep.u.endpoint.listener != 0) {
300#ifdef TRACE_PMEM_CAPS
301            struct cte ramcte;
302            memset(&ramcte, 0, sizeof(ramcte));
303            ramcte.cap.u.ram = ram;
304            ramcte.cap.type = ObjType_RAM;
305            TRACE_CAP_MSG("reclaimed", &ramcte);
306#endif
307            // XXX: This looks pretty ugly. We need an interface.
308            err = lmp_deliver_payload(&monitor_ep, NULL,
309                                      (uintptr_t *)&ram,
310                                      len, false, false);
311        }
312        else {
313            printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bytes 0x%"PRIxGENSIZE"\n", ram.base, ram.bytes);
314        }
315        if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
316            printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bytes 0x%"PRIxGENSIZE"\n", ram.base, ram.bytes);
317            err = SYS_ERR_OK;
318
319        } else {
320            assert(err_is_ok(err));
321        }
322    }
323
324    return err;
325}
326
327/*
328 * Mark phase of revoke mark & sweep
329 */
330
331static void caps_mark_revoke_copy(struct cte *cte)
332{
333    errval_t err;
334    err = caps_try_delete(cte);
335    if (err_is_fail(err)) {
336        // this should not happen as there is a copy of the cap
337        panic("error while marking/deleting cap copy for revoke:"
338              " %"PRIuERRV"\n", err);
339    }
340}
341
342static void caps_mark_revoke_generic(struct cte *cte)
343{
344    errval_t err;
345
346    if (cte->cap.type == ObjType_Null) {
347        return;
348    }
349    if (distcap_is_in_delete(cte)) {
350        return;
351    }
352
353    TRACE_CAP_MSG("marking for revoke", cte);
354
355    err = caps_try_delete(cte);
356    // If we get RETRY_THROUGH_MONITOR we're trying to delete a RAM-derived
357    // cap that is the last one covering the region; and need to delete it in
358    // a proper delete step.
359    if (err_no(err) == SYS_ERR_DELETE_LAST_OWNED ||
360        err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR)
361    {
362        cte->mdbnode.in_delete = true;
363        //cte->delete_node.next_slot = 0;
364
365        // insert into delete list
366        if (!delete_tail) {
367            assert(!delete_head);
368            delete_head = delete_tail = cte;
369            cte->delete_node.next = NULL;
370        }
371        else {
372            assert(delete_head);
373            assert(!delete_tail->delete_node.next);
374            delete_tail->delete_node.next = cte;
375            delete_tail = cte;
376            cte->delete_node.next = NULL;
377        }
378        TRACE_CAP_MSG("inserted into delete list", cte);
379
380        // because the monitors will perform a 2PC that deletes all foreign
381        // copies before starting the delete steps, and because the in_delete
382        // bit marks this cap as "busy" (see distcap_get_state), we can clear
383        // the remote copies bit.
384        cte->mdbnode.remote_copies = 0;
385    }
386    else if (err_is_fail(err)) {
387        // some serious mojo went down in the cleanup voodoo
388        panic("error while marking/deleting descendant cap for revoke:"
389              " %"PRIuERRV"\n", err);
390    }
391}
392
393/**
394 * \brief Delete all copies of a foreign cap.
395 */
396errval_t caps_delete_foreigns(struct cte *cte)
397{
398    errval_t err;
399    struct cte *next;
400    if (cte->mdbnode.owner == my_core_id) {
401        debug(SUBSYS_CAPS, "%s called on %d for %p, owner=%d\n",
402                __FUNCTION__, my_core_id, cte, cte->mdbnode.owner);
403        return SYS_ERR_DELETE_REMOTE_LOCAL;
404    }
405    assert(cte->mdbnode.owner != my_core_id);
406    if (cte->mdbnode.in_delete) {
407        printk(LOG_WARN,
408               "foreign caps with in_delete set,"
409               " this should not happen");
410    }
411
412    TRACE_CAP_MSG("del copies of", cte);
413
414    // Cleanup copies that are > cte in MDB
415    next = mdb_successor(cte);
416    while (next && is_copy(&cte->cap, &next->cap))
417    {
418        assert(next->mdbnode.owner != my_core_id);
419        if (next->mdbnode.in_delete) {
420            printk(LOG_WARN,
421                    "foreign caps with in_delete set,"
422                    " this should not happen");
423        }
424        err = cleanup_copy(next);
425        if (err_is_fail(err)) {
426            panic("error while deleting extra foreign copy for remote_delete:"
427                    " %"PRIuERRV"\n", err);
428        }
429        next = mdb_successor(next);
430    }
431
432    // Cleanup copies that are < cte in MDB
433    next = mdb_predecessor(cte);
434    while (next && is_copy(&cte->cap, &next->cap))
435    {
436        assert(next->mdbnode.owner != my_core_id);
437        if (next->mdbnode.in_delete) {
438            printk(LOG_WARN,
439                    "foreign caps with in_delete set,"
440                    " this should not happen");
441        }
442        err = cleanup_copy(next);
443        if (err_is_fail(err)) {
444            panic("error while deleting extra foreign copy for remote_delete:"
445                    " %"PRIuERRV"\n", err);
446        }
447        next = mdb_predecessor(next);
448    }
449
450    // The capabilities should all be foreign, by nature of the request.
451    // Foreign capabilities are rarely locked, since they can be deleted
452    // immediately. The only time a foreign capability is locked is during
453    // move and retrieve operations. In either case, the lock on the same
454    // capability must also be acquired on the owner for the operation to
455    // succeed. Thus, we can safely unlock any capability here iff the
456    // monitor guarentees that this operation is only executed when the
457    // capability is locked on the owner.
458    cte->mdbnode.locked = false;
459    err = caps_try_delete(cte);
460    if (err_is_fail(err)) {
461        panic("error while deleting foreign copy for remote_delete:"
462              " %"PRIuERRV"\n", err);
463    }
464
465    return SYS_ERR_OK;
466}
467
468/**
469 * \brief Mark capabilities for a revoke operation.
470 * \param base The data for the capability being revoked
471 * \param revoked The revoke target if it is on this core. This specific
472 *        capability copy will not be marked. If supplied, is_copy(base,
473 *        &revoked->cap) must hold.
474 * \returns
475 *        - CAP_NOT_FOUND if no copies or desendants are present on this core.
476 *        - SYS_ERR_OK otherwise.
477 */
478errval_t caps_mark_revoke(struct capability *base, struct cte *revoked)
479{
480    assert(base);
481    assert(!revoked || revoked->mdbnode.owner == my_core_id);
482
483    // SG: In the following code, 'prev' is kind of a misnomer, this is all
484    // just contortions to iterate through all copies and descendants of a
485    // given capability. We update prev to be able to iterate through the tree
486    // even when we're going up and down the tree structure to find the next
487    // predecessor/successor.  -2017-08-29.
488
489    // to avoid multiple mdb_find_greater, we store the predecessor of the
490    // current position.
491    // prev can already be a descendant if there are only descendants of base
492    // on this core.
493    struct cte *prev = mdb_find_greater(base, true), *next = NULL;
494    if (!prev || !(is_copy(base, &prev->cap)
495              || is_ancestor(&prev->cap, base)))
496    {
497        return SYS_ERR_CAP_NOT_FOUND;
498    }
499
500    // Mark copies (backwards): we will never find descendants earlier in the
501    // ordering. However we might find copies!
502    for (next = mdb_predecessor(prev);
503         next && is_copy(base, &next->cap);
504         next = mdb_predecessor(prev))
505    {
506        if (next == revoked) {
507            // do not delete the revoked capability, use it as the new prev
508            // instead, and delete the old prev.
509            next = prev;
510            prev = revoked;
511        }
512        assert(revoked || next->mdbnode.owner != my_core_id);
513        caps_mark_revoke_copy(next);
514    }
515    // Mark copies (forward), use updated "prev". When we're done with this
516    // step next should be == revoked, if revoked != NULL, and succ(next)
517    // should be the first descendant.
518    for (next = mdb_successor(prev);
519         next && is_copy(base, &next->cap);
520         next = mdb_successor(prev))
521    {
522        // note: if next is a copy of base, prev will also be a copy
523        if (next == revoked) {
524            // do not delete the revoked capability, use it as the new prev
525            // instead, and delete the old prev.
526            next = prev;
527            prev = revoked;
528        }
529        assert(revoked || next->mdbnode.owner != my_core_id);
530        caps_mark_revoke_copy(next);
531    }
532
533    assert(!revoked || prev == revoked);
534    assert(is_copy(&prev->cap, base) || is_ancestor(&prev->cap, base));
535
536    // mdb_find_greater() will always find the first descendant if there's no
537    // copies on the core, so we can just mark descendants forwards.
538    // XXX: check that this is true! -SG, 2017-09-08.
539    // Mark descendants forwards
540    for (next = mdb_successor(prev);
541         next && is_ancestor(&next->cap, base);
542         next = mdb_successor(prev))
543    {
544        caps_mark_revoke_generic(next);
545        if (next->cap.type) {
546            // the cap has not been deleted, so we must use it as the new prev
547            prev = next;
548        }
549    }
550
551    if (prev != revoked && !prev->mdbnode.in_delete) {
552        if (is_copy(base, &prev->cap)) {
553            caps_mark_revoke_copy(prev);
554        }
555        else {
556            // due to early termination the condition, prev must be a
557            // descendant
558            assert(is_ancestor(&prev->cap, base));
559            caps_mark_revoke_generic(prev);
560        }
561    }
562
563    return SYS_ERR_OK;
564}
565
566/*
567 * Sweep phase
568 */
569
570static void clear_list_prepend(struct cte *cte)
571{
572    // make sure we don't break delete list by inserting cte that hasn't been
573    // removed from delete list into clear list
574    assert(cte->delete_node.next == NULL);
575
576    if (!clear_tail) {
577        assert(!clear_head);
578        clear_head = clear_tail = cte;
579        cte->delete_node.next = NULL;
580    }
581    else {
582        assert(clear_head);
583        cte->delete_node.next = clear_head;
584        clear_head = cte;
585    }
586    TRACE_CAP_MSG("inserted into clear list", cte);
587}
588
589errval_t caps_delete_step(struct cte *ret_next)
590{
591    errval_t err = SYS_ERR_OK;
592
593    assert(ret_next);
594    assert(ret_next->cap.type == ObjType_Null);
595
596    if (!delete_head) {
597        assert(!delete_tail);
598        return SYS_ERR_CAP_NOT_FOUND;
599    }
600    assert(delete_head->mdbnode.in_delete == true);
601
602    TRACE_CAP_MSG("performing delete step", delete_head);
603    struct cte *cte = delete_head, *next = cte->delete_node.next;
604    if (cte->mdbnode.locked) {
605        err = SYS_ERR_CAP_LOCKED;
606    }
607    else if (distcap_is_foreign(cte) || has_copies(cte)) {
608        err = cleanup_copy(cte);
609    }
610    else if (cte->mdbnode.remote_copies) {
611        err = caps_copyout_last(cte, ret_next);
612        if (err_is_ok(err)) {
613            if (next) {
614                delete_head = next;
615            } else {
616                delete_head = delete_tail = NULL;
617            }
618            err = SYS_ERR_DELETE_LAST_OWNED;
619        }
620    }
621    else {
622        // XXX: need to clear delete_list flag because it's reused for
623        // clear_list? -SG
624        cte->delete_node.next = NULL;
625        err = caps_delete_last(cte, ret_next);
626        if (err_is_fail(err)) {
627            TRACE_CAP_MSG("delete last failed", cte);
628            // if delete_last fails, reinsert in delete list
629            cte->delete_node.next = next;
630        }
631    }
632
633    if (err_is_ok(err)) {
634        if (next) {
635            delete_head = next;
636        } else {
637            delete_head = delete_tail = NULL;
638        }
639    }
640    return err;
641}
642
643errval_t caps_clear_step(struct cte *ret_ram_cap)
644{
645    errval_t err;
646    assert(!delete_head);
647    assert(!delete_tail);
648
649    if (!clear_head) {
650        assert(!clear_tail);
651        return SYS_ERR_CAP_NOT_FOUND;
652    }
653    assert((clear_head == clear_tail) == (!clear_head->delete_node.next));
654
655    struct cte *cte = clear_head;
656
657#ifndef NDEBUG
658    // some sanity checks
659#define CHECK_SLOT(slot) do { \
660    assert((slot)->cap.type == ObjType_Null \
661           || (slot)->cap.type == ObjType_L1CNode \
662           || (slot)->cap.type == ObjType_L2CNode \
663           || (slot)->cap.type == ObjType_Dispatcher); \
664    if ((slot)->cap.type != ObjType_Null) { \
665        assert((slot)->mdbnode.in_delete); \
666    } \
667} while (0)
668
669    if (cte->cap.type == ObjType_L1CNode ||
670        cte->cap.type == ObjType_L2CNode)
671    {
672        for (cslot_t i = 0; i < cnode_get_slots(&cte->cap); i++) {
673            struct cte *slot = caps_locate_slot(get_address(&cte->cap), i);
674            CHECK_SLOT(slot);
675        }
676    }
677    else if (cte->cap.type == ObjType_Dispatcher) {
678        struct dcb *dcb = cte->cap.u.dispatcher.dcb;
679        CHECK_SLOT(&dcb->cspace);
680        CHECK_SLOT(&dcb->disp_cte);
681    }
682    else {
683        panic("Non-CNode/Dispatcher cap type in clear list!");
684    }
685
686#undef CHECK_SLOT
687#endif
688
689    TRACE_CAP_MSG("caps_clear_step for", cte);
690    struct cte *after = cte->delete_node.next;
691    err = cleanup_last(cte, ret_ram_cap);
692    if (err_is_ok(err)) {
693        if (after) {
694            clear_head = after;
695        }
696        else {
697            clear_head = clear_tail = NULL;
698        }
699    }
700    return err;
701}
702
703static errval_t caps_copyout_last(struct cte *target, struct cte *ret_cte)
704{
705    errval_t err;
706
707    // create a copy in slot specified by the caller, then delete
708    // `next` slot so the new copy is still the last copy.
709    err = caps_copy_to_cte(ret_cte, target, false, 0, 0);
710    if (err_is_fail(err)) {
711        return err;
712    }
713
714    err = cleanup_copy(target);
715    if (err_is_fail(err)) {
716        return err;
717    }
718
719    return SYS_ERR_OK;
720}
721
722/*
723 * CNode invocations
724 */
725
726errval_t caps_delete(struct cte *cte)
727{
728    errval_t err;
729
730    TRACE_CAP_MSG("deleting", cte);
731
732    if (cte->mdbnode.locked) {
733        return err_push(SYS_ERR_CAP_LOCKED, SYS_ERR_RETRY_THROUGH_MONITOR);
734    }
735
736    err = caps_try_delete(cte);
737    if (err_no(err) == SYS_ERR_DELETE_LAST_OWNED) {
738        err = err_push(err, SYS_ERR_RETRY_THROUGH_MONITOR);
739    }
740
741    return err;
742}
743
744errval_t caps_revoke(struct cte *cte)
745{
746    TRACE_CAP_MSG("revoking", cte);
747
748    if (cte->mdbnode.locked) {
749        return SYS_ERR_CAP_LOCKED;
750    }
751
752    return SYS_ERR_RETRY_THROUGH_MONITOR;
753}
754