1/**
2 * \file
3 * \brief Arch-generic system calls implementation.
4 */
5
6/*
7 * Copyright (c) 2007-2010,2012, ETH Zurich.
8 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
14 */
15
16#include <kernel.h>
17#include <stdio.h>
18#include <string.h>
19#include <syscall.h>
20#include <barrelfish_kpi/syscalls.h>
21#include <capabilities.h>
22#include <cap_predicates.h>
23#include <coreboot.h>
24#include <mdb/mdb.h>
25#include <mdb/mdb_tree.h>
26#include <cap_predicates.h>
27#include <dispatch.h>
28#include <distcaps.h>
29#include <wakeup.h>
30#include <paging_kernel_helper.h>
31#include <paging_kernel_arch.h>
32#include <exec.h>
33#include <irq.h>
34#include <trace/trace.h>
35#include <trace_definitions/trace_defs.h>
36#include <kcb.h>
37#include <useraccess.h>
38#include <systime.h>
39
40errval_t sys_print(const char *str, size_t length)
41{
42    /* FIXME: check that string is mapped and accessible to caller! */
43    printf("%.*s", (int)length, str);
44    return SYS_ERR_OK;
45}
46
47/* FIXME: lots of missing argument checks in this function */
48struct sysret
49sys_dispatcher_setup(struct capability *to, capaddr_t cptr, uint8_t level,
50                     capaddr_t vptr, capaddr_t dptr, bool run, capaddr_t odptr)
51{
52    errval_t err = SYS_ERR_OK;
53    assert(to->type == ObjType_Dispatcher);
54    struct dcb *dcb = to->u.dispatcher.dcb;
55    assert(dcb != dcb_current);
56
57    lpaddr_t lpaddr;
58
59    /* 0. Handle sys_dispatcher_setup for guest domains */
60    if (cptr == 0x0) {
61        assert(dcb->is_vm_guest);
62        assert(vptr == 0x0);
63        assert(dptr == 0x0);
64        assert(odptr == 0x0);
65        if (!dcb->is_vm_guest || vptr != 0x0 || dptr != 0x0 || odptr != 0x0) {
66            return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
67        }
68        if (run) {
69            // Dispatchers run disabled the first time
70            dcb->disabled = 1;
71            make_runnable(dcb);
72        }
73        return SYSRET(SYS_ERR_OK);
74    }
75
76    assert(!dcb->is_vm_guest);
77    assert(!cptr == 0x0);
78    assert(!vptr == 0x0);
79    assert(!dptr == 0x0);
80    assert(!odptr == 0x0);
81
82    if (cptr == 0x0 || vptr == 0x0 || dptr == 0x0 || odptr == 0x0) {
83        return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
84    }
85
86    /* 1. set cspace root */
87    struct cte *root;
88    err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, level,
89                           &root, CAPRIGHTS_READ);
90    if (err_is_fail(err)) {
91        debug(SUBSYS_CAPS, "caps_lookup_cap for croot=%"PRIxCADDR", level=%d: %"PRIuERRV"\n", cptr, level, err);
92        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
93    }
94    if (root->cap.type != ObjType_L1CNode) {
95        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_INVALID));
96    }
97    err = caps_copy_to_cte(&dcb->cspace, root, false, 0, 0);
98    if (err_is_fail(err)) {
99        debug(SUBSYS_CAPS, "caps_copy_to_cte for croot: %"PRIuERRV"\n", err);
100        return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
101    }
102
103    /* 2. set vspace root */
104    struct capability *vroot;
105    err = caps_lookup_cap(&root->cap, vptr, CNODE_TYPE_COUNT, &vroot, CAPRIGHTS_WRITE);
106    if (err_is_fail(err)) {
107        debug(SUBSYS_CAPS, "caps_lookup_cap for vroot=%"PRIxCADDR": %"PRIuERRV"\n", vptr, err);
108        return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_ROOT));
109    }
110
111    // Insert as dispatcher's VSpace root
112    if (!type_is_vroot(vroot->type)) {
113        return SYSRET(SYS_ERR_DISP_VSPACE_INVALID);
114    }
115    dcb->vspace = gen_phys_to_local_phys(get_address(vroot));
116
117    /* 3. set dispatcher frame pointer */
118    struct cte *dispcte;
119    err = caps_lookup_slot(&root->cap, dptr, CNODE_TYPE_COUNT, &dispcte,
120                           CAPRIGHTS_READ_WRITE);
121    if (err_is_fail(err)) {
122        return SYSRET(err_push(err, SYS_ERR_DISP_FRAME));
123    }
124    struct capability *dispcap = &dispcte->cap;
125    if (dispcap->type != ObjType_Frame) {
126        return SYSRET(SYS_ERR_DISP_FRAME_INVALID);
127    }
128    if (get_size(dispcap) < (1UL << DISPATCHER_FRAME_BITS)) {
129        return SYSRET(SYS_ERR_DISP_FRAME_SIZE);
130    }
131    /* FIXME: check rights? */
132
133    lpaddr = gen_phys_to_local_phys(get_address(dispcap));
134    dcb->disp = local_phys_to_mem(lpaddr);
135    // Copy the cap to dcb also
136    err = caps_copy_to_cte(&dcb->disp_cte, dispcte, false, 0, 0);
137    // If copy fails, something wrong in kernel
138    assert(err_is_ok(err));
139
140    /* 5. Make runnable if desired */
141    if (run) {
142        if (dcb->vspace == 0 || dcb->disp == 0 || dcb->cspace.cap.type != ObjType_L1CNode) {
143            return SYSRET(err_push(err, SYS_ERR_DISP_NOT_RUNNABLE));
144        }
145
146        // XXX: dispatchers run disabled the first time they start
147        dcb->disabled = 1;
148        //printf("DCB: %p %.*s\n", dcb, DISP_NAME_LEN, dcb->disp->name);
149        make_runnable(dcb);
150    }
151
152    /* 6. Copy domain ID off given dispatcher */
153    // XXX: We generally pass the current dispatcher as odisp, see e.g.
154    // lib/spawndomain/spawn.c:spawn_run().  In that case the new domain gets
155    // the same domain id as the domain doing the spawning. cf. T271
156    // -SG, 2016-07-21.
157    struct capability *odisp;
158    err = caps_lookup_cap(&dcb_current->cspace.cap, odptr, CNODE_TYPE_COUNT,
159                          &odisp, CAPRIGHTS_READ);
160    if (err_is_fail(err)) {
161        return SYSRET(err_push(err, SYS_ERR_DISP_OCAP_LOOKUP));
162    }
163    if (odisp->type != ObjType_Dispatcher) {
164        return SYSRET(SYS_ERR_DISP_OCAP_TYPE);
165    }
166    dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
167
168    /* 7. (HACK) Set current core id */
169    struct dispatcher_shared_generic *disp =
170        get_dispatcher_shared_generic(dcb->disp);
171    disp->curr_core_id = my_core_id;
172
173    /* 8. Enable tracing for new domain */
174    err = trace_new_application(disp->name, (uintptr_t) dcb);
175
176    if (err == TRACE_ERR_NO_BUFFER) {
177        // Try to use the boot buffer.
178        trace_new_boot_application(disp->name, (uintptr_t) dcb);
179    }
180
181    // Setup systime frequency
182    disp->systime_frequency = systime_frequency;
183
184    return SYSRET(SYS_ERR_OK);
185}
186
187struct sysret
188sys_dispatcher_properties(struct capability *to,
189                          enum task_type type, unsigned long deadline,
190                          unsigned long wcet, unsigned long period,
191                          unsigned long release, unsigned short weight)
192{
193    assert(to->type == ObjType_Dispatcher);
194
195#ifdef CONFIG_SCHEDULER_RBED
196    struct dcb *dcb = to->u.dispatcher.dcb;
197
198    assert(type >= TASK_TYPE_BEST_EFFORT && type <= TASK_TYPE_HARD_REALTIME);
199    assert(wcet <= deadline);
200    assert(wcet <= period);
201    assert(type != TASK_TYPE_BEST_EFFORT || weight > 0);
202
203    trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
204                152);
205    scheduler_remove(dcb);
206
207    /* Set task properties */
208    dcb->type = type;
209    dcb->deadline = deadline;
210    dcb->wcet = wcet;
211    dcb->period = period;
212    dcb->release_time = (release == 0) ? systime_now() : release;
213    dcb->weight = weight;
214
215    make_runnable(dcb);
216#endif
217
218    return SYSRET(SYS_ERR_OK);
219}
220
221/**
222 * \param root                  Source CSpace root cnode to invoke
223 * \param source_croot          Source capability cspace root
224 * \param source_cptr           Source capability cptr
225 * \param offset                Offset into source capability from which to retype
226 * \param type                  Type to retype to
227 * \param objsize               Object size for variable-sized types
228 * \param count                 number of objects to create
229 * \param dest_cspace_cptr      Destination CSpace cnode cptr relative to
230 *                              source cspace root
231 * \param dest_cnode_cptr       Destination cnode cptr
232 * \param dest_slot             Destination slot number
233 * \param dest_cnode_level      Level/depth of destination cnode
234 */
235struct sysret
236sys_retype(struct capability *root, capaddr_t source_croot, capaddr_t source_cptr,
237           gensize_t offset, enum objtype type, gensize_t objsize, size_t count,
238           capaddr_t dest_cspace_cptr, capaddr_t dest_cnode_cptr,
239           uint8_t dest_cnode_level, cslot_t dest_slot, bool from_monitor)
240{
241    errval_t err;
242
243    /* Parameter checking */
244    if (type == ObjType_Null || type >= ObjType_Num) {
245        return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
246    }
247
248    /* Lookup source cspace root cnode */
249    struct capability *source_root;
250    err = caps_lookup_cap(root, source_croot, 2, &source_root, CAPRIGHTS_READ);
251    if (err_is_fail(err)) {
252        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
253    }
254    /* Source capability */
255    struct cte *source_cte;
256    // XXX: level from where
257    err = caps_lookup_slot(source_root, source_cptr, 2, &source_cte,
258                           CAPRIGHTS_READ);
259    if (err_is_fail(err)) {
260        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
261    }
262    assert(source_cte != NULL);
263
264    /* Destination cspace root cnode in source cspace */
265    struct capability *dest_cspace_root;
266    // XXX: level from where?
267    err = caps_lookup_cap(root, dest_cspace_cptr, 2,
268                          &dest_cspace_root, CAPRIGHTS_READ);
269    if (err_is_fail(err)) {
270        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
271    }
272    /* dest_cspace_root must be L1 CNode */
273    if (dest_cspace_root->type != ObjType_L1CNode) {
274        return SYSRET(SYS_ERR_CNODE_TYPE);
275    }
276
277    /* Destination cnode in destination cspace */
278    struct capability *dest_cnode_cap;
279    err = caps_lookup_cap(dest_cspace_root, dest_cnode_cptr, dest_cnode_level,
280                          &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
281    if (err_is_fail(err)) {
282        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
283    }
284
285    /* check that destination cnode is actually a cnode */
286    if (dest_cnode_cap->type != ObjType_L1CNode &&
287        dest_cnode_cap->type != ObjType_L2CNode) {
288        debug(SUBSYS_CAPS, "destcn type: %d\n", dest_cnode_cap->type);
289        return SYSRET(SYS_ERR_DEST_CNODE_INVALID);
290    }
291
292    return SYSRET(caps_retype(type, objsize, count, dest_cnode_cap, dest_slot,
293                              source_cte, offset, from_monitor));
294}
295
296struct sysret sys_create(struct capability *root, enum objtype type,
297                         size_t objsize, capaddr_t dest_cnode_cptr,
298                         uint8_t dest_level, cslot_t dest_slot)
299{
300    errval_t err;
301    uint8_t size = 0;
302    genpaddr_t base = 0;
303
304    /* Paramter checking */
305    if (type == ObjType_Null || type >= ObjType_Num) {
306        return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
307    }
308
309    /* Destination CNode */
310    struct capability *dest_cnode_cap;
311    err = caps_lookup_cap(root, dest_cnode_cptr, dest_level,
312                          &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
313    if (err_is_fail(err)) {
314        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
315    }
316
317    /* Destination slot */
318    struct cte *dest_cte;
319    dest_cte = caps_locate_slot(get_address(dest_cnode_cap), dest_slot);
320    if (dest_cte->cap.type != ObjType_Null) {
321        return SYSRET(SYS_ERR_SLOTS_IN_USE);
322    }
323
324    /* List capabilities allowed to be created at runtime. */
325    switch(type) {
326
327    case ObjType_ID:
328        break;
329
330    // only certain types of capabilities can be created at runtime
331    default:
332        return SYSRET(SYS_ERR_TYPE_NOT_CREATABLE);
333    }
334
335    return SYSRET(caps_create_new(type, base, size, objsize, my_core_id, dest_cte));
336}
337
338/**
339 * Common code for copying and minting except the mint flag and param passing
340 *
341 * \param root              Source cspace root cnode
342 * \param dest_cspace_cptr  Destination cspace root cnode cptr in source cspace
343 * \parma destcn_cptr       Destination cnode cptr relative to destination cspace
344 * \param dest_slot         Destination slot
345 * \param source_cptr       Source capability cptr relative to source cspace
346 * \param destcn_level      Level/depth of destination cnode
347 * \param source_level      Level/depth of source cap
348 * \param param1            First parameter for mint
349 * \param param2            Second parameter for mint
350 * \param mint              Call is a minting operation
351 */
352struct sysret
353sys_copy_or_mint(struct capability *root, capaddr_t dest_cspace_cptr,
354                 capaddr_t destcn_cptr, cslot_t dest_slot, capaddr_t
355                 source_croot_ptr, capaddr_t source_cptr,
356                 uint8_t destcn_level, uint8_t source_level,
357                 uintptr_t param1, uintptr_t param2, bool mint)
358{
359    errval_t err;
360
361    if (!mint) {
362        param1 = param2 = 0;
363    }
364
365    if (root->type != ObjType_L1CNode) {
366        debug(SUBSYS_CAPS, "%s: root->type = %d\n", __FUNCTION__, root->type);
367        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
368    }
369    assert(root->type == ObjType_L1CNode);
370
371    /* Lookup source cspace in our cspace */
372    struct capability *src_croot;
373    err = caps_lookup_cap(root, source_croot_ptr, 2, &src_croot,
374                          CAPRIGHTS_READ);
375    if (err_is_fail(err)) {
376        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
377    }
378    if (src_croot->type != ObjType_L1CNode) {
379        debug(SUBSYS_CAPS, "%s: src rootcn type = %d\n", __FUNCTION__, src_croot->type);
380        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
381    }
382    /* Lookup source cap in source cspace */
383    struct cte *src_cap;
384    err = caps_lookup_slot(src_croot, source_cptr, source_level, &src_cap,
385                           CAPRIGHTS_READ);
386    if (err_is_fail(err)) {
387        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
388    }
389
390    /* Destination cspace root cnode in source cspace */
391    struct capability *dest_cspace_root;
392    // XXX: level from where?
393    err = caps_lookup_cap(root, dest_cspace_cptr, 2, &dest_cspace_root, CAPRIGHTS_READ);
394    if (err_is_fail(err)) {
395        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
396    }
397    /* dest_cspace_root must be L1 CNode */
398    if (dest_cspace_root->type != ObjType_L1CNode) {
399        debug(SUBSYS_CAPS, "%s: dest rootcn type = %d\n", __FUNCTION__, src_croot->type);
400        return SYSRET(SYS_ERR_CNODE_TYPE);
401    }
402
403    /* Destination cnode in destination cspace */
404    struct cte *dest_cnode_cap;
405    err = caps_lookup_slot(dest_cspace_root, destcn_cptr, destcn_level,
406                           &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
407    if (err_is_fail(err)) {
408        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
409    }
410
411    /* Perform copy */
412    if (dest_cnode_cap->cap.type == ObjType_L1CNode ||
413        dest_cnode_cap->cap.type == ObjType_L2CNode)
414    {
415        return SYSRET(caps_copy_to_cnode(dest_cnode_cap, dest_slot, src_cap,
416                                         mint, param1, param2));
417    } else {
418        return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
419    }
420}
421
422struct sysret
423sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_root_cptr,
424        capaddr_t source_cptr, uint8_t source_level, uintptr_t flags,
425        uintptr_t offset, uintptr_t pte_count, capaddr_t mapping_crootptr,
426        capaddr_t mapping_cnptr, uint8_t mapping_cn_level, cslot_t mapping_slot)
427{
428    assert (type_is_vnode(ptable->type));
429
430    errval_t err;
431
432    /* XXX: TODO: make root explicit argument for sys_map() */
433    struct capability *root = &dcb_current->cspace.cap;
434
435    /* Lookup source root cn cap in own cspace */
436    struct capability *src_root;
437    err = caps_lookup_cap(root, source_root_cptr, source_level, &src_root,
438                          CAPRIGHTS_READ);
439    if (err_is_fail(err)) {
440        return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
441    }
442    if (src_root->type != ObjType_L1CNode) {
443        return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
444    }
445    /* Lookup source cap in source cspace */
446    struct cte *src_cte;
447    err = caps_lookup_slot(src_root, source_cptr, source_level, &src_cte,
448                           CAPRIGHTS_READ);
449    if (err_is_fail(err)) {
450        return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
451    }
452
453    /* Lookup mapping cspace root in our cspace */
454    struct capability *mapping_croot;
455    err = caps_lookup_cap(root, mapping_crootptr, 2, &mapping_croot,
456                          CAPRIGHTS_READ_WRITE);
457    if (err_is_fail(err)) {
458        return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
459    }
460
461    /* Lookup mapping slot in dest cspace */
462    struct cte *mapping_cnode_cte;
463    err = caps_lookup_slot(mapping_croot, mapping_cnptr, mapping_cn_level,
464                           &mapping_cnode_cte, CAPRIGHTS_READ_WRITE);
465    if (err_is_fail(err)) {
466        return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
467    }
468
469    if (mapping_cnode_cte->cap.type != ObjType_L2CNode) {
470        return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
471    }
472
473    struct cte *mapping_cte = caps_locate_slot(get_address(&mapping_cnode_cte->cap),
474                                               mapping_slot);
475    if (mapping_cte->cap.type != ObjType_Null) {
476        return SYSRET(SYS_ERR_SLOT_IN_USE);
477    }
478
479    /* Perform map */
480    // XXX: this does not check if we do have CAPRIGHTS_READ_WRITE on
481    // the destination cap (the page table we're inserting into)
482    return SYSRET(caps_copy_to_vnode(cte_for_cap(ptable), slot, src_cte, flags,
483                                     offset, pte_count, mapping_cte));
484}
485
486struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t level)
487{
488    errval_t err;
489    struct cte *slot;
490    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
491    if (err_is_fail(err)) {
492        return SYSRET(err);
493    }
494
495    err = caps_delete(slot);
496    return SYSRET(err);
497}
498
499struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t level)
500{
501    errval_t err;
502    struct cte *slot;
503    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
504    if (err_is_fail(err)) {
505        return SYSRET(err);
506    }
507
508    err = caps_revoke(slot);
509    return SYSRET(err);
510}
511
512struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t level)
513{
514    errval_t err;
515    struct cte *slot;
516    err = caps_lookup_slot(root, cptr, level, &slot, CAPRIGHTS_READ);
517    if (err_is_fail(err)) {
518        return SYSRET(err);
519    }
520
521    distcap_state_t state = distcap_get_state(slot);
522    return (struct sysret) { .error = SYS_ERR_OK, .value = state };
523}
524
525struct sysret sys_get_size_l1cnode(struct capability *root)
526{
527    assert(root->type == ObjType_L1CNode);
528
529    return (struct sysret) { .error = SYS_ERR_OK,
530        .value = root->u.l1cnode.allocated_bytes};
531}
532
533
534struct sysret sys_resize_l1cnode(struct capability *root, capaddr_t newroot_cptr,
535                                 capaddr_t retcn_cptr, cslot_t retslot)
536{
537    errval_t err;
538
539    if (root->type != ObjType_L1CNode) {
540        return SYSRET(SYS_ERR_RESIZE_NOT_L1);
541    }
542    assert(root->type == ObjType_L1CNode);
543
544    // Lookup new L1 CNode cap
545    struct cte *newroot;
546    err = caps_lookup_slot(root, newroot_cptr, 2, &newroot, CAPRIGHTS_ALLRIGHTS);
547    if (err_is_fail(err)) {
548        return SYSRET(err);
549    }
550    if (newroot->cap.type != ObjType_L1CNode) {
551        return SYSRET(SYS_ERR_INVALID_SOURCE_TYPE);
552    }
553    // TODO: check size of new CNode
554
555    // Lookup slot for returning RAM of old CNode
556    struct capability *retcn;
557    err = caps_lookup_cap(root, retcn_cptr, 1, &retcn, CAPRIGHTS_READ_WRITE);
558    if (err_is_fail(err)) {
559        return SYSRET(err);
560    }
561    struct cte *ret = caps_locate_slot(get_address(retcn), retslot);
562    if (ret->cap.type != ObjType_Null) {
563        return SYSRET(SYS_ERR_SLOT_IN_USE);
564    }
565
566    // Copy over caps from old root cnode to new root cnode
567    cslot_t root_slots = cnode_get_slots(root);
568    cslot_t newroot_slots = cnode_get_slots(&newroot->cap);
569    for (cslot_t i = 0; i < min(root_slots, newroot_slots); i++) {
570        struct cte *src = caps_locate_slot(get_address(root), i);
571        if (src->cap.type == ObjType_Null) {
572            // skip empty slots in old root cnode
573            continue;
574        }
575        struct cte *dest = caps_locate_slot(get_address(&newroot->cap), i);
576        if (dest->cap.type != ObjType_Null) {
577            // fail if slot in destination cnode occupied
578            return SYSRET(SYS_ERR_SLOT_IN_USE);
579        }
580        // do proper cap copy
581        err = caps_copy_to_cte(dest, src, false, 0, 0);
582        if (err_is_fail(err)) {
583            return SYSRET(err);
584        }
585    }
586
587    // Copy old root cnode into ret slot, this way we can delete the copies
588    // in the task cnode and the dispatcher that we need to update.
589    err = caps_copy_to_cte(ret, cte_for_cap(root), false, 0, 0);
590    if (err_is_fail(err)) {
591        return SYSRET(err);
592    }
593
594    // Set new root cnode in dispatcher
595    err = caps_delete(&dcb_current->cspace);
596    if (err_is_fail(err)) {
597        return SYSRET(err);
598    }
599    err = caps_copy_to_cte(&dcb_current->cspace, newroot, false, 0, 0);
600    if (err_is_fail(err)) {
601        return SYSRET(err);
602    }
603
604    // Set new root cnode in task cnode
605    struct cte *taskcn = caps_locate_slot(get_address(&newroot->cap),
606                                          ROOTCN_SLOT_TASKCN);
607    struct cte *rootcn_cap = caps_locate_slot(get_address(&taskcn->cap),
608                                              TASKCN_SLOT_ROOTCN);
609    assert(rootcn_cap == cte_for_cap(root));
610    err = caps_delete(rootcn_cap);
611    if (err_is_fail(err)) {
612        return SYSRET(err);
613    }
614    err = caps_copy_to_cte(rootcn_cap, newroot, false, 0, 0);
615    if (err_is_fail(err)) {
616        return SYSRET(err);
617    }
618
619    return SYSRET(SYS_ERR_OK);
620}
621
622struct sysret sys_yield(capaddr_t target)
623{
624    dispatcher_handle_t handle = dcb_current->disp;
625    struct dispatcher_shared_generic *disp =
626        get_dispatcher_shared_generic(handle);
627
628
629    debug(SUBSYS_DISPATCH, "%.*s yields%s\n", DISP_NAME_LEN, disp->name,
630          !disp->haswork && disp->lmp_delivered == disp->lmp_seen
631           ? " and is removed from the runq" : "");
632
633    if (dcb_current->disabled == false) {
634        printk(LOG_ERR, "SYSCALL_YIELD while enabled\n");
635        dump_dispatcher(disp);
636        return SYSRET(SYS_ERR_CALLER_ENABLED);
637    }
638
639    struct capability *yield_to = NULL;
640    if (target != CPTR_NULL) {
641        errval_t err;
642
643        /* directed yield */
644        err = caps_lookup_cap(&dcb_current->cspace.cap, target, 2,
645                              &yield_to, CAPRIGHTS_READ);
646        if (err_is_fail(err)) {
647            return SYSRET(err);
648        } else if (yield_to == NULL ||
649                   (yield_to->type != ObjType_EndPoint
650                    && yield_to->type != ObjType_Dispatcher)) {
651            return SYSRET(SYS_ERR_INVALID_YIELD_TARGET);
652        }
653        /* FIXME: check rights? */
654    }
655
656    // Since we've done a yield, we explicitly ensure that the
657    // dispatcher is upcalled the next time (on the understanding that
658    // this is what the dispatcher wants), otherwise why call yield?
659    dcb_current->disabled = false;
660
661    // Remove from queue when no work and no more messages and no missed wakeup
662    systime_t wakeup = disp->wakeup;
663    if (!disp->haswork && disp->lmp_delivered == disp->lmp_seen
664        && (wakeup == 0 || wakeup > (systime_now() + kcb_current->kernel_off))) {
665
666        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_SCHED_REMOVE,
667            (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF);
668        trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
669                151);
670
671        scheduler_remove(dcb_current);
672        if (wakeup != 0) {
673            wakeup_set(dcb_current, wakeup);
674        }
675    } else {
676        // Otherwise yield for the timeslice
677        scheduler_yield(dcb_current);
678    }
679
680    if (yield_to != NULL) {
681        struct dcb *target_dcb = NULL;
682        if (yield_to->type == ObjType_EndPoint) {
683            target_dcb = yield_to->u.endpoint.listener;
684        } else if (yield_to->type == ObjType_Dispatcher) {
685            target_dcb = yield_to->u.dispatcher.dcb;
686        } else {
687            panic("invalid type in yield cap");
688        }
689
690        trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_YIELD,
691            (uint32_t)(lvaddr_t)target_dcb & 0xFFFFFFFF);
692        make_runnable(target_dcb);
693        dispatch(target_dcb);
694    } else {
695//        trace_event(TRACE_SUBSYS_BNET, TRACE_EVENT_BNET_YIELD,
696//            0);
697
698        /* undirected yield */
699        dispatch(schedule());
700    }
701
702    panic("Yield returned!");
703}
704
705struct sysret sys_suspend(bool do_halt)
706{
707    dispatcher_handle_t handle = dcb_current->disp;
708    struct dispatcher_shared_generic *disp =
709        get_dispatcher_shared_generic(handle);
710
711    debug(SUBSYS_DISPATCH, "%.*s suspends (halt: %d)\n", DISP_NAME_LEN, disp->name, do_halt);
712
713    if (dcb_current->disabled == false) {
714        printk(LOG_ERR, "SYSCALL_SUSPEND while enabled\n");
715        return SYSRET(SYS_ERR_CALLER_ENABLED);
716    }
717
718    dcb_current->disabled = false;
719
720    if (do_halt) {
721        //printf("%s:%s:%d: before halt of core (%"PRIuCOREID")\n",
722        //       __FILE__, __FUNCTION__, __LINE__, my_core_id);
723        halt();
724    } else {
725        // Note this only works if we're calling this inside
726        // the kcb we're currently running
727        printk(LOG_NOTE, "in sys_suspend(<no_halt>)!\n");
728        printk(LOG_NOTE, "calling switch_kcb!\n");
729        struct kcb *next = kcb_current->next;
730        kcb_current->next = NULL;
731        switch_kcb(next);
732        // enable kcb scheduler
733        printk(LOG_NOTE, "enabling kcb scheduler!\n");
734        kcb_sched_suspended = false;
735        // schedule something in the other kcb
736        dispatch(schedule());
737    }
738
739    panic("Yield returned!");
740}
741
742
743/**
744 * The format of the returned ID is:
745 *
746 * --------------------------------------------------------------------
747 * |             0 (unused) | coreid |         core_local_id          |
748 * --------------------------------------------------------------------
749 * 63                        39       31                              0 Bit
750 *
751 */
752struct sysret sys_idcap_identify(struct capability *cap, idcap_id_t *id)
753{
754    STATIC_ASSERT_SIZEOF(coreid_t, 1);
755
756    idcap_id_t coreid = (idcap_id_t) cap->u.id.coreid;
757    *id = coreid << 32 | cap->u.id.core_local_id;
758
759    return SYSRET(SYS_ERR_OK);
760}
761
762/**
763 * Calls correct handler function to spawn an app core.
764 *
765 * At the moment spawn_core_handlers is set-up per
766 * architecture inside text_init() usually found in init.c.
767 *
768 * \note Generally the x86 terms of BSP and APP core are used
769 * throughout Barrelfish to distinguish between bootstrap core (BSP)
770 * and application cores (APP).
771 *
772 * \param  core_id  Identifier of the core which we want to boot
773 * \param  cpu_type Architecture of the core.
774 * \param  entry    Entry point for code to start execution.
775 *
776 * \retval SYS_ERR_OK Core successfully booted.
777 * \retval SYS_ERR_ARCHITECTURE_NOT_SUPPORTED No handler registered for
778 *     the specified cpu_type.
779 * \retval SYS_ERR_CORE_NOT_FOUND Core failed to boot.
780 */
781
782struct sysret sys_monitor_spawn_core(hwid_t target, enum cpu_type cpu_type,
783                                     genvaddr_t entry, genpaddr_t context)
784{
785    errval_t err;
786
787    assert(cpu_type < CPU_TYPE_NUM);
788    // TODO(gz): assert core_id valid
789    // TODO(gz): assert entry range?
790
791    if (cpu_type >= CPU_TYPE_NUM) {
792        return SYSRET(SYS_ERR_ARCHITECTURE_NOT_SUPPORTED);
793    }
794
795    coreboot_start_fn_t start_fn = coreboot_get_spawn_handler(cpu_type);
796
797    if (start_fn == NULL) {
798        return SYSRET(SYS_ERR_ARCHITECTURE_NOT_SUPPORTED);
799    }
800
801    err = start_fn(target, entry, context);
802    if(err_is_fail(err)) {
803        err = err_push(err, SYS_ERR_CORE_NOT_FOUND);
804    }
805    return SYSRET(err);
806}
807
808struct sysret sys_kernel_add_kcb(struct kcb *new_kcb)
809{
810    kcb_add(new_kcb);
811
812    // update kernel_now offset
813    new_kcb->kernel_off -= systime_now();
814    // reset scheduler statistics
815    scheduler_reset_time();
816    // update current core id of all domains
817    kcb_update_core_id(new_kcb);
818    // upcall domains with registered interrupts to tell them to re-register
819    irq_table_notify_domains(new_kcb);
820
821    return SYSRET(SYS_ERR_OK);
822}
823
824struct sysret sys_kernel_remove_kcb(struct kcb * to_remove)
825{
826    return SYSRET(kcb_remove(to_remove));
827}
828
829struct sysret sys_kernel_suspend_kcb_sched(bool suspend)
830{
831    printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
832    kcb_sched_suspended = suspend;
833    return SYSRET(SYS_ERR_OK);
834}
835
836struct sysret sys_handle_kcb_identify(struct capability* to, struct frame_identity *fi)
837{
838    // Return with physical base address of frame
839    // XXX: pack size into bottom bits of base address
840    assert(to->type == ObjType_KernelControlBlock);
841    lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
842    assert((vkcb & BASE_PAGE_MASK) == 0);
843
844    if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
845        return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
846    }
847
848    fi->base = get_address(to);
849    fi->bytes = get_size(to);
850
851    return SYSRET(SYS_ERR_OK);
852}
853