blob: 36fe7f7b4e89ee2f57152efcf7e7e00f04dceb34 [file] [log] [blame]
// Copyright 2016 The Fuchsia Authors
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT
#include <err.h>
#include <kernel/stats.h>
#include <kernel/thread.h>
#include <lib/ktrace.h>
#include <lib/vdso.h>
#include <object/process_dispatcher.h>
#include <platform.h>
#include <syscalls/syscalls.h>
#include <trace.h>
#include <zircon/zx-syscall-numbers.h>
#include <inttypes.h>
#include <stdint.h>
#include "priv.h"
#include "vdso-valid-sysret.h"
#define LOCAL_TRACE 0
__NO_INLINE static int sys_invalid_syscall(uint64_t num, uint64_t pc,
uintptr_t vdso_code_address) {
LTRACEF("invalid syscall %lu from PC %#lx vDSO code %#lx\n",
num, pc, vdso_code_address);
thread_signal_policy_exception();
return ZX_ERR_BAD_SYSCALL;
}
// N.B. Interrupts must be disabled on entry and they will be disabled on exit.
// The reason is the two calls two arch_curr_cpu_num in the ktrace calls: we
// don't want the cpu changing during the call.
__NO_INLINE static void syscall_pre(uint64_t syscall_num, uint64_t pc, ProcessDispatcher** current_process, uintptr_t* vdso_code_address) {
ktrace_tiny(TAG_SYSCALL_ENTER, (static_cast<uint32_t>(syscall_num) << 8) | arch_curr_cpu_num());
CPU_STATS_INC(syscalls);
/* re-enable interrupts to maintain kernel preemptiveness
This must be done after the above ktrace_tiny call, and after the
above CPU_STATS_INC call as it also calls arch_curr_cpu_num. */
arch_enable_ints();
LTRACEF_LEVEL(2, "t %p syscall num %" PRIu64 " ip/pc %#" PRIx64 "\n",
get_current_thread(), syscall_num, pc);
*current_process = ProcessDispatcher::GetCurrent();
*vdso_code_address = (*current_process)->vdso_code_address();
}
__NO_INLINE static syscall_result syscall_post(uint64_t syscall_num, uint64_t ret) {
LTRACEF_LEVEL(2, "t %p ret %#" PRIx64 "\n", get_current_thread(), ret);
/* re-disable interrupts on the way out
This must be done before the below ktrace_tiny call. */
arch_disable_ints();
ktrace_tiny(TAG_SYSCALL_EXIT, (static_cast<uint32_t>(syscall_num << 8)) | arch_curr_cpu_num());
// The assembler caller will re-disable interrupts at the appropriate time.
return {ret, thread_is_signaled(get_current_thread())};
}
template <typename T>
static inline syscall_result do_syscall(uint64_t syscall_num, uint64_t pc,
bool (*valid_pc)(uintptr_t), T make_call) {
ProcessDispatcher* current_process;
uintptr_t vdso_code_address;
// call the shared pre syscall routine
syscall_pre(syscall_num, pc, &current_process, &vdso_code_address);
uint64_t ret;
if (unlikely(!valid_pc(pc - vdso_code_address))) {
ret = sys_invalid_syscall(syscall_num, pc, vdso_code_address);
} else {
ret = make_call(current_process);
}
// call the shared post-syscall routine
return syscall_post(syscall_num, ret);
}
syscall_result unknown_syscall(uint64_t syscall_num, uint64_t pc) {
return do_syscall(syscall_num, pc,
[](uintptr_t) { return false; },
[&](ProcessDispatcher*) {
__builtin_unreachable();
return ZX_ERR_INTERNAL;
});
}
// Autogenerated per-syscall wrapper functions.
#include <zircon/syscall-kernel-wrappers.inc>