1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <types.h>
9#include <benchmark/benchmark.h>
10#include <arch/benchmark.h>
11#include <benchmark/benchmark_track.h>
12#include <benchmark/benchmark_utilisation.h>
13#include <api/syscall.h>
14#include <api/failures.h>
15#include <api/faults.h>
16#include <kernel/cspace.h>
17#include <kernel/faulthandler.h>
18#include <kernel/thread.h>
19#include <kernel/vspace.h>
20#include <machine/io.h>
21#include <plat/machine/hardware.h>
22#include <object/interrupt.h>
23#include <model/statedata.h>
24#include <string.h>
25#include <kernel/traps.h>
26#include <arch/machine.h>
27
28#ifdef CONFIG_DEBUG_BUILD
29#include <arch/machine/capdl.h>
30#endif
31
32/* The haskell function 'handleEvent' is split into 'handleXXX' variants
33 * for each event causing a kernel entry */
34
35exception_t handleInterruptEntry(void)
36{
37    irq_t irq;
38
39    irq = getActiveIRQ();
40#ifdef CONFIG_KERNEL_MCS
41    if (SMP_TERNARY(clh_is_self_in_queue(), 1)) {
42        updateTimestamp();
43        checkBudget();
44    }
45#endif
46
47    if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
48        handleInterrupt(irq);
49        Arch_finaliseInterrupt();
50    } else {
51#ifdef CONFIG_IRQ_REPORTING
52        userError("Spurious interrupt!");
53#endif
54        handleSpuriousIRQ();
55    }
56
57#ifdef CONFIG_KERNEL_MCS
58    if (SMP_TERNARY(clh_is_self_in_queue(), 1)) {
59#endif
60        schedule();
61        activateThread();
62#ifdef CONFIG_KERNEL_MCS
63    }
64#endif
65
66    return EXCEPTION_NONE;
67}
68
69exception_t handleUnknownSyscall(word_t w)
70{
71#ifdef CONFIG_PRINTING
72    if (w == SysDebugPutChar) {
73        kernel_putchar(getRegister(NODE_STATE(ksCurThread), capRegister));
74        return EXCEPTION_NONE;
75    }
76    if (w == SysDebugDumpScheduler) {
77#ifdef CONFIG_DEBUG_BUILD
78        debug_dumpScheduler();
79#endif
80        return EXCEPTION_NONE;
81    }
82#endif
83#ifdef CONFIG_DEBUG_BUILD
84    if (w == SysDebugHalt) {
85        tcb_t *UNUSED tptr = NODE_STATE(ksCurThread);
86        printf("Debug halt syscall from user thread %p \"%s\"\n", tptr, TCB_PTR_DEBUG_PTR(tptr)->tcbName);
87        halt();
88    }
89    if (w == SysDebugSnapshot) {
90#if defined CONFIG_ARCH_ARM || defined CONFIG_ARCH_X86_64 || defined CONFIG_ARCH_RISCV32
91        tcb_t *UNUSED tptr = NODE_STATE(ksCurThread);
92        printf("Debug snapshot syscall from user thread %p \"%s\"\n", tptr, TCB_PTR_DEBUG_PTR(tptr)->tcbName);
93        capDL();
94#else
95        printf("Debug snapshot syscall not supported for the current arch\n");
96#endif
97        return EXCEPTION_NONE;
98    }
99    if (w == SysDebugCapIdentify) {
100        word_t cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
101        lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), cptr);
102        word_t cap_type = cap_get_capType(lu_ret.cap);
103        setRegister(NODE_STATE(ksCurThread), capRegister, cap_type);
104        return EXCEPTION_NONE;
105    }
106
107    if (w == SysDebugNameThread) {
108        /* This is a syscall meant to aid debugging, so if anything goes wrong
109         * then assume the system is completely misconfigured and halt */
110        const char *name;
111        word_t len;
112        word_t cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
113        lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), cptr);
114        /* ensure we got a TCB cap */
115        word_t cap_type = cap_get_capType(lu_ret.cap);
116        if (cap_type != cap_thread_cap) {
117            userError("SysDebugNameThread: cap is not a TCB, halting");
118            halt();
119        }
120        /* Add 1 to the IPC buffer to skip the message info word */
121        name = (const char *)(lookupIPCBuffer(true, NODE_STATE(ksCurThread)) + 1);
122        if (!name) {
123            userError("SysDebugNameThread: Failed to lookup IPC buffer, halting");
124            halt();
125        }
126        /* ensure the name isn't too long */
127        len = strnlen(name, seL4_MsgMaxLength * sizeof(word_t));
128        if (len == seL4_MsgMaxLength * sizeof(word_t)) {
129            userError("SysDebugNameThread: Name too long, halting");
130            halt();
131        }
132        setThreadName(TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)), name);
133        return EXCEPTION_NONE;
134    }
135#if defined ENABLE_SMP_SUPPORT && defined CONFIG_ARCH_ARM
136    if (w == SysDebugSendIPI) {
137        seL4_Word target = getRegister(NODE_STATE(ksCurThread), capRegister);
138        seL4_Word irq = getRegister(NODE_STATE(ksCurThread), msgInfoRegister);
139
140        if (target > CONFIG_MAX_NUM_NODES) {
141            userError("SysDebugSendIPI: Invalid target, halting");
142            halt();
143        }
144        if (irq > 15) {
145            userError("SysDebugSendIPI: Invalid IRQ, not a SGI, halting");
146            halt();
147        }
148
149        ipi_send_target(CORE_IRQ_TO_IRQT(0, irq), BIT(target));
150        return EXCEPTION_NONE;
151    }
152#endif /* ENABLE_SMP_SUPPORT && CONFIG_ARCH_ARM */
153#endif /* CONFIG_DEBUG_BUILD */
154
155#ifdef CONFIG_DANGEROUS_CODE_INJECTION
156    if (w == SysDebugRun) {
157        ((void (*)(void *))getRegister(NODE_STATE(ksCurThread), capRegister))((void *)getRegister(NODE_STATE(ksCurThread),
158                                                                                                  msgInfoRegister));
159        return EXCEPTION_NONE;
160    }
161#endif
162
163#ifdef CONFIG_KERNEL_X86_DANGEROUS_MSR
164    if (w == SysX86DangerousWRMSR) {
165        uint64_t val;
166        uint32_t reg = getRegister(NODE_STATE(ksCurThread), capRegister);
167        if (CONFIG_WORD_SIZE == 32) {
168            val = (uint64_t)getSyscallArg(0, NULL) | ((uint64_t)getSyscallArg(1, NULL) << 32);
169        } else {
170            val = getSyscallArg(0, NULL);
171        }
172        x86_wrmsr(reg, val);
173        return EXCEPTION_NONE;
174    } else if (w == SysX86DangerousRDMSR) {
175        uint64_t val;
176        uint32_t reg = getRegister(NODE_STATE(ksCurThread), capRegister);
177        val = x86_rdmsr(reg);
178        int num = 1;
179        if (CONFIG_WORD_SIZE == 32) {
180            setMR(NODE_STATE(ksCurThread), NULL, 0, val & 0xffffffff);
181            setMR(NODE_STATE(ksCurThread), NULL, 1, val >> 32);
182            num++;
183        } else {
184            setMR(NODE_STATE(ksCurThread), NULL, 0, val);
185        }
186        setRegister(NODE_STATE(ksCurThread), msgInfoRegister, wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, num)));
187        return EXCEPTION_NONE;
188    }
189#endif
190
191#ifdef CONFIG_ENABLE_BENCHMARKS
192    if (w == SysBenchmarkFlushCaches) {
193#ifdef CONFIG_ARCH_ARM
194        tcb_t *thread = NODE_STATE(ksCurThread);
195        if (getRegister(thread, capRegister)) {
196            arch_clean_invalidate_L1_caches(getRegister(thread, msgInfoRegister));
197        } else {
198            arch_clean_invalidate_caches();
199        }
200#else
201        arch_clean_invalidate_caches();
202#endif
203        return EXCEPTION_NONE;
204    } else if (w == SysBenchmarkResetLog) {
205#ifdef CONFIG_KERNEL_LOG_BUFFER
206        if (ksUserLogBuffer == 0) {
207            userError("A user-level buffer has to be set before resetting benchmark.\
208                    Use seL4_BenchmarkSetLogBuffer\n");
209            setRegister(NODE_STATE(ksCurThread), capRegister, seL4_IllegalOperation);
210            return EXCEPTION_SYSCALL_ERROR;
211        }
212
213        ksLogIndex = 0;
214#endif /* CONFIG_KERNEL_LOG_BUFFER */
215#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
216        NODE_STATE(benchmark_log_utilisation_enabled) = true;
217        benchmark_track_reset_utilisation(NODE_STATE(ksIdleThread));
218        NODE_STATE(ksCurThread)->benchmark.schedule_start_time = ksEnter;
219        NODE_STATE(ksCurThread)->benchmark.number_schedules++;
220
221        NODE_STATE(benchmark_start_time) = ksEnter;
222        NODE_STATE(benchmark_kernel_time) = 0;
223        NODE_STATE(benchmark_kernel_number_entries) = 0;
224        NODE_STATE(benchmark_kernel_number_schedules) = 1;
225        benchmark_arch_utilisation_reset();
226#endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
227        setRegister(NODE_STATE(ksCurThread), capRegister, seL4_NoError);
228        return EXCEPTION_NONE;
229    } else if (w == SysBenchmarkFinalizeLog) {
230#ifdef CONFIG_KERNEL_LOG_BUFFER
231        ksLogIndexFinalized = ksLogIndex;
232        setRegister(NODE_STATE(ksCurThread), capRegister, ksLogIndexFinalized);
233#endif /* CONFIG_KERNEL_LOG_BUFFER */
234#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
235        benchmark_utilisation_finalise();
236#endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
237        return EXCEPTION_NONE;
238    } else if (w == SysBenchmarkSetLogBuffer) {
239#ifdef CONFIG_KERNEL_LOG_BUFFER
240        word_t cptr_userFrame = getRegister(NODE_STATE(ksCurThread), capRegister);
241
242        if (benchmark_arch_map_logBuffer(cptr_userFrame) != EXCEPTION_NONE) {
243            setRegister(NODE_STATE(ksCurThread), capRegister, seL4_IllegalOperation);
244            return EXCEPTION_SYSCALL_ERROR;
245        }
246
247        setRegister(NODE_STATE(ksCurThread), capRegister, seL4_NoError);
248        return EXCEPTION_NONE;
249#endif /* CONFIG_KERNEL_LOG_BUFFER */
250    }
251
252#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
253    else if (w == SysBenchmarkGetThreadUtilisation) {
254        benchmark_track_utilisation_dump();
255        return EXCEPTION_NONE;
256    } else if (w == SysBenchmarkResetThreadUtilisation) {
257        word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister);
258        lookupCap_ret_t lu_ret;
259        word_t cap_type;
260
261        lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr);
262        /* ensure we got a TCB cap */
263        cap_type = cap_get_capType(lu_ret.cap);
264        if (cap_type != cap_thread_cap) {
265            userError("SysBenchmarkResetThreadUtilisation: cap is not a TCB, halting");
266            return EXCEPTION_NONE;
267        }
268
269        tcb_t *tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap));
270
271        benchmark_track_reset_utilisation(tcb);
272        return EXCEPTION_NONE;
273    }
274#ifdef CONFIG_DEBUG_BUILD
275    else if (w == SysBenchmarkDumpAllThreadsUtilisation) {
276        printf("{\n");
277        printf("  \"BENCHMARK_TOTAL_UTILISATION\":%lu,\n",
278               (word_t)(NODE_STATE(benchmark_end_time) - NODE_STATE(benchmark_start_time)));
279        printf("  \"BENCHMARK_TOTAL_KERNEL_UTILISATION\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_time));
280        printf("  \"BENCHMARK_TOTAL_NUMBER_KERNEL_ENTRIES\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_number_entries));
281        printf("  \"BENCHMARK_TOTAL_NUMBER_SCHEDULES\":%lu,\n", (word_t) NODE_STATE(benchmark_kernel_number_schedules));
282        printf("  \"BENCHMARK_TCB_\": [\n");
283        for (tcb_t *curr = NODE_STATE(ksDebugTCBs); curr != NULL; curr = TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext) {
284            printf("    {\n");
285            printf("      \"NAME\":\"%s\",\n", TCB_PTR_DEBUG_PTR(curr)->tcbName);
286            printf("      \"UTILISATION\":%lu,\n", (word_t) curr->benchmark.utilisation);
287            printf("      \"NUMBER_SCHEDULES\":%lu,\n", (word_t) curr->benchmark.number_schedules);
288            printf("      \"KERNEL_UTILISATION\":%lu,\n", (word_t) curr->benchmark.kernel_utilisation);
289            printf("      \"NUMBER_KERNEL_ENTRIES\":%lu\n", (word_t) curr->benchmark.number_kernel_entries);
290            printf("    }");
291            if (TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext != NULL) {
292                printf(",\n");
293            } else {
294                printf("\n");
295            }
296        }
297        printf("  ]\n}\n");
298        return EXCEPTION_NONE;
299    } else if (w == SysBenchmarkResetAllThreadsUtilisation) {
300        for (tcb_t *curr = NODE_STATE(ksDebugTCBs); curr != NULL; curr = TCB_PTR_DEBUG_PTR(curr)->tcbDebugNext) {
301            benchmark_track_reset_utilisation(curr);
302        }
303        return EXCEPTION_NONE;
304    }
305#endif /* CONFIG_DEBUG_BUILD */
306#endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */
307
308    else if (w == SysBenchmarkNullSyscall) {
309        return EXCEPTION_NONE;
310    }
311#endif /* CONFIG_ENABLE_BENCHMARKS */
312
313    MCS_DO_IF_BUDGET({
314#ifdef CONFIG_SET_TLS_BASE_SELF
315        if (w == SysSetTLSBase)
316        {
317            word_t tls_base = getRegister(NODE_STATE(ksCurThread), capRegister);
318            /*
319             * This updates the real register as opposed to the thread state
320             * value. For many architectures, the TLS variables only get
321             * updated on a thread switch.
322             */
323            return Arch_setTLSRegister(tls_base);
324        }
325#endif
326        current_fault = seL4_Fault_UnknownSyscall_new(w);
327        handleFault(NODE_STATE(ksCurThread));
328    })
329
330    schedule();
331    activateThread();
332
333    return EXCEPTION_NONE;
334}
335
336exception_t handleUserLevelFault(word_t w_a, word_t w_b)
337{
338    MCS_DO_IF_BUDGET({
339        current_fault = seL4_Fault_UserException_new(w_a, w_b);
340        handleFault(NODE_STATE(ksCurThread));
341    })
342    schedule();
343    activateThread();
344
345    return EXCEPTION_NONE;
346}
347
348exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType)
349{
350    MCS_DO_IF_BUDGET({
351
352        exception_t status = handleVMFault(NODE_STATE(ksCurThread), vm_faultType);
353        if (status != EXCEPTION_NONE)
354        {
355            handleFault(NODE_STATE(ksCurThread));
356        }
357    })
358
359    schedule();
360    activateThread();
361
362    return EXCEPTION_NONE;
363}
364
365#ifdef CONFIG_KERNEL_MCS
366static exception_t handleInvocation(bool_t isCall, bool_t isBlocking, bool_t canDonate, bool_t firstPhase, cptr_t cptr)
367#else
368static exception_t handleInvocation(bool_t isCall, bool_t isBlocking)
369#endif
370{
371    seL4_MessageInfo_t info;
372    lookupCapAndSlot_ret_t lu_ret;
373    word_t *buffer;
374    exception_t status;
375    word_t length;
376    tcb_t *thread;
377
378    thread = NODE_STATE(ksCurThread);
379
380    info = messageInfoFromWord(getRegister(thread, msgInfoRegister));
381#ifndef CONFIG_KERNEL_MCS
382    cptr_t cptr = getRegister(thread, capRegister);
383#endif
384
385    /* faulting section */
386    lu_ret = lookupCapAndSlot(thread, cptr);
387
388    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
389        userError("Invocation of invalid cap #%lu.", cptr);
390        current_fault = seL4_Fault_CapFault_new(cptr, false);
391
392        if (isBlocking) {
393            handleFault(thread);
394        }
395
396        return EXCEPTION_NONE;
397    }
398
399    buffer = lookupIPCBuffer(false, thread);
400
401    status = lookupExtraCaps(thread, buffer, info);
402
403    if (unlikely(status != EXCEPTION_NONE)) {
404        userError("Lookup of extra caps failed.");
405        if (isBlocking) {
406            handleFault(thread);
407        }
408        return EXCEPTION_NONE;
409    }
410
411    /* Syscall error/Preemptible section */
412    length = seL4_MessageInfo_get_length(info);
413    if (unlikely(length > n_msgRegisters && !buffer)) {
414        length = n_msgRegisters;
415    }
416#ifdef CONFIG_KERNEL_MCS
417    status = decodeInvocation(seL4_MessageInfo_get_label(info), length,
418                              cptr, lu_ret.slot, lu_ret.cap,
419                              current_extra_caps, isBlocking, isCall,
420                              canDonate, firstPhase, buffer);
421#else
422    status = decodeInvocation(seL4_MessageInfo_get_label(info), length,
423                              cptr, lu_ret.slot, lu_ret.cap,
424                              current_extra_caps, isBlocking, isCall,
425                              buffer);
426#endif
427
428    if (unlikely(status == EXCEPTION_PREEMPTED)) {
429        return status;
430    }
431
432    if (unlikely(status == EXCEPTION_SYSCALL_ERROR)) {
433        if (isCall) {
434            replyFromKernel_error(thread);
435        }
436        return EXCEPTION_NONE;
437    }
438
439    if (unlikely(
440            thread_state_get_tsType(thread->tcbState) == ThreadState_Restart)) {
441        if (isCall) {
442            replyFromKernel_success_empty(thread);
443        }
444        setThreadState(thread, ThreadState_Running);
445    }
446
447    return EXCEPTION_NONE;
448}
449
450#ifdef CONFIG_KERNEL_MCS
451static inline lookupCap_ret_t lookupReply(void)
452{
453    word_t replyCPtr = getRegister(NODE_STATE(ksCurThread), replyRegister);
454    lookupCap_ret_t lu_ret = lookupCap(NODE_STATE(ksCurThread), replyCPtr);
455    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
456        userError("Reply cap lookup failed");
457        current_fault = seL4_Fault_CapFault_new(replyCPtr, true);
458        handleFault(NODE_STATE(ksCurThread));
459        return lu_ret;
460    }
461
462    if (unlikely(cap_get_capType(lu_ret.cap) != cap_reply_cap)) {
463        userError("Cap in reply slot is not a reply");
464        current_fault = seL4_Fault_CapFault_new(replyCPtr, true);
465        handleFault(NODE_STATE(ksCurThread));
466        lu_ret.status = EXCEPTION_FAULT;
467        return lu_ret;
468    }
469
470    return lu_ret;
471}
472#else
473static void handleReply(void)
474{
475    cte_t *callerSlot;
476    cap_t callerCap;
477
478    callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller);
479    callerCap = callerSlot->cap;
480
481    switch (cap_get_capType(callerCap)) {
482    case cap_reply_cap: {
483        tcb_t *caller;
484
485        if (cap_reply_cap_get_capReplyMaster(callerCap)) {
486            break;
487        }
488        caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap));
489        /* Haskell error:
490         * "handleReply: caller must not be the current thread" */
491        assert(caller != NODE_STATE(ksCurThread));
492        doReplyTransfer(NODE_STATE(ksCurThread), caller, callerSlot,
493                        cap_reply_cap_get_capReplyCanGrant(callerCap));
494        return;
495    }
496
497    case cap_null_cap:
498        userError("Attempted reply operation when no reply cap present.");
499        return;
500
501    default:
502        break;
503    }
504
505    fail("handleReply: invalid caller cap");
506}
507#endif
508
509#ifdef CONFIG_KERNEL_MCS
510static void handleRecv(bool_t isBlocking, bool_t canReply)
511#else
512static void handleRecv(bool_t isBlocking)
513#endif
514{
515    word_t epCPtr;
516    lookupCap_ret_t lu_ret;
517
518    epCPtr = getRegister(NODE_STATE(ksCurThread), capRegister);
519
520    lu_ret = lookupCap(NODE_STATE(ksCurThread), epCPtr);
521
522    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
523        /* current_lookup_fault has been set by lookupCap */
524        current_fault = seL4_Fault_CapFault_new(epCPtr, true);
525        handleFault(NODE_STATE(ksCurThread));
526        return;
527    }
528
529    switch (cap_get_capType(lu_ret.cap)) {
530    case cap_endpoint_cap:
531        if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) {
532            current_lookup_fault = lookup_fault_missing_capability_new(0);
533            current_fault = seL4_Fault_CapFault_new(epCPtr, true);
534            handleFault(NODE_STATE(ksCurThread));
535            break;
536        }
537
538#ifdef CONFIG_KERNEL_MCS
539        cap_t ep_cap = lu_ret.cap;
540        cap_t reply_cap = cap_null_cap_new();
541        if (canReply) {
542            lu_ret = lookupReply();
543            if (lu_ret.status != EXCEPTION_NONE) {
544                return;
545            } else {
546                reply_cap = lu_ret.cap;
547            }
548        }
549        receiveIPC(NODE_STATE(ksCurThread), ep_cap, isBlocking, reply_cap);
550#else
551        deleteCallerCap(NODE_STATE(ksCurThread));
552        receiveIPC(NODE_STATE(ksCurThread), lu_ret.cap, isBlocking);
553#endif
554        break;
555
556    case cap_notification_cap: {
557        notification_t *ntfnPtr;
558        tcb_t *boundTCB;
559        ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(lu_ret.cap));
560        boundTCB = (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr);
561        if (unlikely(!cap_notification_cap_get_capNtfnCanReceive(lu_ret.cap)
562                     || (boundTCB && boundTCB != NODE_STATE(ksCurThread)))) {
563            current_lookup_fault = lookup_fault_missing_capability_new(0);
564            current_fault = seL4_Fault_CapFault_new(epCPtr, true);
565            handleFault(NODE_STATE(ksCurThread));
566            break;
567        }
568
569        receiveSignal(NODE_STATE(ksCurThread), lu_ret.cap, isBlocking);
570        break;
571    }
572    default:
573        current_lookup_fault = lookup_fault_missing_capability_new(0);
574        current_fault = seL4_Fault_CapFault_new(epCPtr, true);
575        handleFault(NODE_STATE(ksCurThread));
576        break;
577    }
578}
579
580#ifdef CONFIG_KERNEL_MCS
581static inline void mcsIRQ(irq_t irq)
582{
583    if (IRQT_TO_IRQ(irq) == KERNEL_TIMER_IRQ) {
584        /* if this is a timer irq we must update the time as we need to reprogram the timer, and we
585         * can't lose the time that has just been used by the kernel. */
586        updateTimestamp();
587    }
588
589    /* at this point we could be handling a timer interrupt which actually ends the current
590     * threads timeslice. However, preemption is possible on revoke, which could have deleted
591     * the current thread and/or the current scheduling context, rendering them invalid. */
592    if (isSchedulable(NODE_STATE(ksCurThread))) {
593        /* if the thread is schedulable, the tcb and scheduling context are still valid */
594        checkBudget();
595    } else if (NODE_STATE(ksCurSC)->scRefillMax) {
596        /* otherwise, if the thread is not schedulable, the SC could be valid - charge it if so */
597        chargeBudget(NODE_STATE(ksConsumed), false, CURRENT_CPU_INDEX(), true);
598    }
599
600}
601#else
602#define handleRecv(isBlocking, canReply) handleRecv(isBlocking)
603#define mcsIRQ(irq)
604#define handleInvocation(isCall, isBlocking, canDonate, firstPhase, cptr) handleInvocation(isCall, isBlocking)
605#endif
606
607static void handleYield(void)
608{
609#ifdef CONFIG_KERNEL_MCS
610    /* Yield the current remaining budget */
611    ticks_t consumed = NODE_STATE(ksCurSC)->scConsumed + NODE_STATE(ksConsumed);
612    chargeBudget(refill_head(NODE_STATE(ksCurSC))->rAmount, false, CURRENT_CPU_INDEX(), true);
613    /* Manually updated the scConsumed so that the full timeslice isn't added, just what was consumed */
614    NODE_STATE(ksCurSC)->scConsumed = consumed;
615#else
616    tcbSchedDequeue(NODE_STATE(ksCurThread));
617    SCHED_APPEND_CURRENT_TCB;
618    rescheduleRequired();
619#endif
620}
621
622exception_t handleSyscall(syscall_t syscall)
623{
624    exception_t ret;
625    irq_t irq;
626    MCS_DO_IF_BUDGET({
627        switch (syscall)
628        {
629        case SysSend:
630            ret = handleInvocation(false, true, false, false, getRegister(NODE_STATE(ksCurThread), capRegister));
631            if (unlikely(ret != EXCEPTION_NONE)) {
632                irq = getActiveIRQ();
633                if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
634                    mcsIRQ(irq);
635                    handleInterrupt(irq);
636                    Arch_finaliseInterrupt();
637                }
638            }
639
640            break;
641
642        case SysNBSend:
643            ret = handleInvocation(false, false, false, false, getRegister(NODE_STATE(ksCurThread), capRegister));
644            if (unlikely(ret != EXCEPTION_NONE)) {
645                irq = getActiveIRQ();
646                if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
647                    mcsIRQ(irq);
648                    handleInterrupt(irq);
649                    Arch_finaliseInterrupt();
650                }
651            }
652            break;
653
654        case SysCall:
655            ret = handleInvocation(true, true, true, false, getRegister(NODE_STATE(ksCurThread), capRegister));
656            if (unlikely(ret != EXCEPTION_NONE)) {
657                irq = getActiveIRQ();
658                if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
659                    mcsIRQ(irq);
660                    handleInterrupt(irq);
661                    Arch_finaliseInterrupt();
662                }
663            }
664            break;
665
666        case SysRecv:
667            handleRecv(true, true);
668            break;
669#ifndef CONFIG_KERNEL_MCS
670        case SysReply:
671            handleReply();
672            break;
673
674        case SysReplyRecv:
675            handleReply();
676            handleRecv(true, true);
677            break;
678
679#else /* CONFIG_KERNEL_MCS */
680        case SysWait:
681            handleRecv(true, false);
682            break;
683
684        case SysNBWait:
685            handleRecv(false, false);
686            break;
687        case SysReplyRecv: {
688            cptr_t reply = getRegister(NODE_STATE(ksCurThread), replyRegister);
689            ret = handleInvocation(false, false, true, true, reply);
690            /* reply cannot error and is not preemptible */
691            assert(ret == EXCEPTION_NONE);
692            handleRecv(true, true);
693            break;
694        }
695
696        case SysNBSendRecv: {
697            cptr_t dest = getNBSendRecvDest();
698            ret = handleInvocation(false, false, true, true, dest);
699            if (unlikely(ret != EXCEPTION_NONE)) {
700                irq = getActiveIRQ();
701                if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
702                    mcsIRQ(irq);
703                    handleInterrupt(irq);
704                    Arch_finaliseInterrupt();
705                }
706                break;
707            }
708            handleRecv(true, true);
709            break;
710        }
711
712        case SysNBSendWait:
713            ret = handleInvocation(false, false, true, true, getRegister(NODE_STATE(ksCurThread), replyRegister));
714            if (unlikely(ret != EXCEPTION_NONE)) {
715                irq = getActiveIRQ();
716                if (IRQT_TO_IRQ(irq) != IRQT_TO_IRQ(irqInvalid)) {
717                    mcsIRQ(irq);
718                    handleInterrupt(irq);
719                    Arch_finaliseInterrupt();
720                }
721                break;
722            }
723            handleRecv(true, false);
724            break;
725#endif
726        case SysNBRecv:
727            handleRecv(false, true);
728            break;
729
730        case SysYield:
731            handleYield();
732            break;
733
734        default:
735            fail("Invalid syscall");
736        }
737
738    })
739
740    schedule();
741    activateThread();
742
743    return EXCEPTION_NONE;
744}
745