blob: 36fa57d4d33fecb23e558d26e79e537980077426 [file] [log] [blame]
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/atomic"
"syscall/zx"
"unsafe"
)
type mOS struct{}
type sigset struct{}
//go:nosplit
func getRandomData(r []byte) {
zx.RandRead(r)
}
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
deadline := zx.Sys_deadline_after(zx.Duration(ns))
if ns < 0 {
deadline = zx.Time(zx.TimensecInfinite)
}
zx.Sys_futex_wait((*int32)(unsafe.Pointer(addr)), int32(val), zx.HandleInvalid, deadline)
}
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
zx.Sys_futex_wake((*int32)(unsafe.Pointer(addr)), cnt)
}
//go:nosplit
func osyield() {
zx.Sys_nanosleep(0)
}
// cgocallm0 calls a C function on the current stack.
//
// It is intended for use inside functions like osinit that run
// before mstart directly on an OS thread stack.
//
// If in doubt, do not use.
func cgocallm0(fn, arg unsafe.Pointer)
func osinit() {
if _cgo_get_initial_handles != nil {
cgocallm0(_cgo_get_initial_handles, unsafe.Pointer(&fdioHandles))
for i := 0; i < 3; i = i + 1 {
zx.StdioHandles[i] = fdioHandles.stdioClones[i]
zx.StdioHandleTypes[i] = int(fdioHandles.stdioCloneTypes[i])
}
zx.ProcHandle = zx.Handle(fdioHandles.processSelf)
zx.VMARRoot = zx.VMAR(fdioHandles.vmarRootSelf)
} else {
// TODO: implement cgo-less init
println("runtime: no fuchsia process handle without cgo yet")
exit(2)
}
ncpu = int32(zx.Sys_system_get_num_cpus())
physPageSize = 4096
}
func parseRootNS() {
if fdioHandles.rootNSNumHandles > 0 {
const maxHandleCount = 1 << 20 // arbitrary
paths := (*(*[maxHandleCount]*byte)(unsafe.Pointer(fdioHandles.rootNSPaths)))[:fdioHandles.rootNSNumHandles]
handles := (*(*[maxHandleCount]zx.Handle)(unsafe.Pointer(fdioHandles.rootNSHandles)))[:fdioHandles.rootNSNumHandles]
zx.RootNSMap = make(map[string]zx.Handle)
for i, p := range paths {
zx.RootNSMap[gostring(p)] = handles[i]
}
}
}
// Filled in by runtime/cgo when linked into binary.
var (
_cgo_get_initial_handles unsafe.Pointer
_cgo_get_thread_self_handle unsafe.Pointer
)
//go:nosplit
func minit() {
var h zx.Handle
asmcgocall(_cgo_get_thread_self_handle, unsafe.Pointer(&h))
atomic.Storeuintptr(&getg().m.thread, uintptr(h))
}
//go:nosplit
func unminit() {
// TODO
}
func sigpanic() {
// TODO
}
func mpreinit(mp *m) {
// TODO
}
//go:nosplit
func msigsave(mp *m) {
// TODO
}
//go:nosplit
func msigrestore(sigmask sigset) {
// TODO
}
func initsig(preinit bool) {
// TODO
}
//go:nosplit
func sigblock() {
// TODO
}
func sigenable(sig uint32) {
// TODO
}
func sigdisable(sig uint32) {
// TODO
}
func sigignore(sig uint32) {
// TODO
}
func crash() {
*(*int32)(nil) = 0
}
func threadinit(mp *m)
var gothreadname = []byte("gothread")
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
p := zx.Handle(0) // TODO: only works due to temporary hack in zircon
var h zx.Handle
status := zx.Sys_thread_create(p, &gothreadname[0], uint(len(gothreadname)), 0, &h)
if status < 0 {
println("runtime: newosproc zx.Sys_thread_create failed:", h)
exit(2)
}
mp.thread = uintptr(h)
status = zx.Sys_thread_start(h, zx.Vaddr(funcPC(threadinit)), zx.Vaddr(stk), uintptr(unsafe.Pointer(mp)), 0)
if status < 0 {
println("runtime: newosproc zx.Sys_thread_start failed:", h)
exit(2)
}
}
type zx_proc_args struct {
protocol uint32
version uint32
handle_info_off uint32
args_off uint32
args_num uint32
}
type zx_proc_info struct {
magic uint32
version uint32
next_tls_slot uint32
proc_args *zx_proc_args
handle *zx.Handle
handle_info *uint32
handle_count int32
argv **byte
argc int32
}
type zx_tls_root struct {
self *zx_tls_root
proc *zx_proc_info
magic uint32
flags uint16
maxslots uint16
slots [8]uintptr // has length maxslots
}
func resetcpuprofiler(hz int32) {
// TODO
}
// fdio_init matches a Go struct of the same name in gcc_fdio.c.
type fdio_init struct {
stdioClones [3]zx.Handle
stdioCloneNumHandles [3]uint32
stdioCloneTypes [3]uint32
processSelf zx.Handle
vmarRootSelf zx.Handle
envlen int32
environ **byte
_ [4]byte
rootNSNumHandles int32
rootNSHandles *zx.Handle
rootNSPaths **byte
}
var fdioHandles fdio_init
func goenvs() {
if _cgo_get_initial_handles != nil {
const maxEnvSize = 1 << 20 // arbitrary
envp := (*(*[maxEnvSize]*byte)(unsafe.Pointer(fdioHandles.environ)))[:fdioHandles.envlen]
envs = make([]string, len(envp))
for i, e := range envp {
envs[i] = gostring(e)
}
// TODO: find a better place to call this
parseRootNS()
} else {
// TODO: implement cgo-less init
println("runtime: no fuchsia process handle without cgo yet")
exit(2)
}
}
const _NSIG = 65 // TODO
const (
ZX_CLOCK_MONOTONIC = 0
ZX_HANDLE_INVALID = 0
ZX_INFO_THREAD = uint32(10)
ZX_OK = int32(0)
ZX_THREAD_STATE_GENERAL_REGS = uint32(0)
ZX_THREAD_STATE_NEW = uint32(0)
ZX_THREAD_SUSPENDED = uint32(1 << 5)
ZX_THREAD_TERMINATED = uint32(1 << 3)
ZX_TIME_INFINITE = int64(^uint64(1 << 63))
ZX_TIMER_SIGNALED = uint32(1 << 3)
ZX_TIMER_SLACK_EARLY = 1
)
//go:nosplit
func nanotime() int64 {
return int64(zx.Sys_clock_get_monotonic())
}
//go:linkname time_now time.now
//go:nosplit
func time_now() (sec int64, nsec int32, mono int64) {
const ZX_CLOCK_UTC = 1
var x zx.Time
zx.Sys_clock_get(ZX_CLOCK_UTC, &x)
return int64(x / 1e9), int32(x % 1e9), nanotime()
}
func unixnanotime() int64 {
return nanotime()
}
// #define LOGBUF_MAX (ZX_LOG_RECORD_MAX - sizeof(zx_log_record_t))
const logBufMax = 224
var logger zx.Handle
var logBufferArray [logBufMax]byte
var logBufferN int
//go:nosplit
func write(fd uintptr, buf unsafe.Pointer, n int32) int32 {
if fd != 1 && fd != 2 {
panic("runtime.write(): can not write to non-log fd")
}
// This initialization mechanism is not thread-safe, but if the only calls to 'write' with
// these file descriptors come from print, they will already be serialized by a lock in
// print's implementation.
if logger <= 0 {
zx.Sys_debuglog_create(0, 0, &logger)
logBufferN = 0
}
// Ideally this should panic, but that would go down "blind", which is arguably worse.
if logger <= 0 {
return -1
}
for i := int32(0); i < n; i++ {
c := *(*byte)(unsafe.Pointer(uintptr(buf) + uintptr(i)))
if c == '\n' {
zx.Sys_debuglog_write(logger, 0, unsafe.Pointer(&logBufferArray[0]), uint(logBufferN))
logBufferN = 0
continue
}
if c < ' ' {
continue
}
logBufferArray[logBufferN] = c
logBufferN++
if logBufferN == len(logBufferArray) {
zx.Sys_debuglog_write(logger, 0, unsafe.Pointer(&logBufferArray[0]), uint(logBufferN))
logBufferN = 0
}
}
return n
}
//go:nosplit
func exit(code int32) {
zx.Sys_process_exit(int64(code))
}
//go:nosplit
func usleep(usec uint32) {
zx.Sys_nanosleep(zx.Sys_deadline_after(zx.Duration(usec * 1000)))
}
func signame(sig uint32) string {
return "unknown_sig" // TODO
}
// zircon_mktls
//go:nosplit
func zircon_mktls() {
// TODO
}
//go:nosplit
func zircon_settls(val uintptr) {
// TODO: should call zx_set_object_property with property ZX_PROP_REGISTER_FS in amd64 or
// ZX_PROP_REGISTER_CP15 on arm.
}
//go:linkname os_sigpipe os.sigpipe
func os_sigpipe() {
// TODO
}
// gsignalStack is unused on fuchsia
type gsignalStack struct{}
const (
profStkSize = uintptr(1 << 16)
)
var (
profTimer uint32 // timer handle
profThread uint32 // thread handle
profHz = ZX_TIME_INFINITE
profName = []byte("profiler")
profStk unsafe.Pointer
profStkFail = []byte("runtime: failed to allocate stack for the new OS thread\n")
)
type zxCpuSet struct {
mask [16]uint32
}
type zxInfoThread struct {
state uint32
waitExceptionPortType uint32
affinityMask zxCpuSet
}
//go:noescape
//go:nosplit
func profiler_object_wait_one(handle uint32, signals uint32, deadline int64, observed *uint32) int32
func profilem(mp *m, thread uint32) {
// Ignore nascent threads.
var observed uint32
var info zxInfoThread
var actualSize uint64
var availSize uint64
if status := vdsoCall_zx_object_get_info(thread, ZX_INFO_THREAD, unsafe.Pointer(&info), uint(unsafe.Sizeof(info)), unsafe.Pointer(&actualSize), unsafe.Pointer(&availSize)); status != ZX_OK {
return
}
if info.state == ZX_THREAD_STATE_NEW {
return
}
// Wait for the thread to be suspended.
if status := profiler_object_wait_one(thread, ZX_THREAD_SUSPENDED|ZX_THREAD_TERMINATED, ZX_TIME_INFINITE, &observed); status != ZX_OK {
return
}
// Ignore threads that were terminated as a result of suspension.
if observed&ZX_THREAD_TERMINATED == ZX_THREAD_TERMINATED {
return
}
var r generalRegs
if status := vdsoCall_zx_thread_read_state(thread, ZX_THREAD_STATE_GENERAL_REGS, unsafe.Pointer(&r), uint(unsafe.Sizeof(r))); status != ZX_OK {
return
}
if mp.curg != nil {
sigprof(r.rPC(), r.rSP(), r.rLR(), mp.curg, mp)
} else {
sigprofNonGoPC(r.rPC())
}
}
// profileLoop is called when pprof CPU tracing is enabled. It runs without a p, hence nowritebarrierrec.
//
//go:nosplit
//go:nowritebarrierrec
func profileLoop() {
me := getg().m
for {
var observed uint32
if status := profiler_object_wait_one(profTimer, ZX_TIMER_SIGNALED, ZX_TIME_INFINITE, &observed); status != ZX_OK {
continue
}
// Scan over all threads and suspend them, then trace. Ideally, we'd scan over
// all of them and give them a chance to suspend before this, but I don't want
// to deal with allocation here.
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
thread := uint32(atomic.Loaduintptr(&mp.thread))
// Do not profile threads blocked on Notes (this includes idle worker threads, idle timer thread,
// idle heap scavenger, etc.). Do not profile this thread.
if thread == ZX_HANDLE_INVALID || mp.profilehz == 0 || mp.blocked || mp == me {
continue
}
var token uint32
if status := vdsoCall_zx_task_suspend_token(thread, unsafe.Pointer(&token)); status == ZX_OK {
profilem(mp, thread)
}
vdsoCall_zx_handle_close(token)
}
due := vdsoCall_zx_deadline_after(profHz)
if status := vdsoCall_zx_timer_set(profTimer, due, ZX_TIMER_SLACK_EARLY); status != ZX_OK {
// TODO: what do?
return
}
}
}
// profileloop is defined in sys_fuchsia_{amd,arm}64.s
func profileloop()
func setProcessCPUProfiler(hz int32) {
// This function is entered in the critical section guarded by
// prof.signalLock in proc.go and is thus safe to assume the profiler's
// timer is created atomically.
if profTimer == ZX_HANDLE_INVALID {
if status := vdsoCall_zx_timer_create(ZX_TIMER_SLACK_EARLY, ZX_CLOCK_MONOTONIC, unsafe.Pointer(&profTimer)); status != ZX_OK {
// TODO: what do?
return
}
newm(profileLoop, nil)
}
}
func setThreadCPUProfiler(hz int32) {
// Don't do anything if we don't have a timer yet.
if profTimer == ZX_HANDLE_INVALID {
return
}
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
if hz > 0 {
ns := int64(int64(1000000000) / int64(hz))
// Bound tracing frequency to microseconds. This is likely
// already well above the granularity traces can actually
// provide.
if ns < 1000 {
ns = 1000
}
profHz = ns
due := vdsoCall_zx_deadline_after(ns)
if status := vdsoCall_zx_timer_set(profTimer, due, ZX_TIMER_SLACK_EARLY); status != ZX_OK {
// TODO: what do?
return
}
} else {
if status := vdsoCall_zx_timer_cancel(profTimer); status != ZX_OK {
return
}
}
}
func exitThread(wait *uint32) {}
//go:nosplit
//go:nowritebarrierrec
func clearSignalHandlers() {}