| // Copyright 2016 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package runtime |
| |
| import ( |
| "runtime/internal/atomic" |
| "unsafe" |
| ) |
| |
| const nsecInf = int64(0x7FFFFFFFFFFFFFFF) |
| |
| func osyield() |
| |
| //go:linkname cputicks syscall/zx.Sys_ticks_get |
| func cputicks() int64 |
| |
| //go:linkname sys_cprng_draw syscall/zx.Sys_cprng_draw |
| //go:noescape |
| func sys_cprng_draw(buffer unsafe.Pointer, size uint) |
| |
| //go:linkname sys_system_get_num_cpus syscall/zx.Sys_system_get_num_cpus |
| func sys_system_get_num_cpus() uint32 |
| |
| //go:linkname sys_deadline_after syscall/zx.Sys_deadline_after |
| func sys_deadline_after(ns int64) int64 |
| |
| //go:linkname sys_futex_wait syscall/zx.Sys_futex_wait |
| //go:noescape |
| func sys_futex_wait(v *int32, cv int32, newOwner uint32, deadline int64) int32 |
| |
| //go:linkname sys_futex_wake syscall/zx.Sys_futex_wake |
| //go:noescape |
| func sys_futex_wake(v *int32, count uint32) int32 |
| |
| //go:linkname sys_thread_create syscall/zx.Sys_thread_create |
| //go:noescape |
| func sys_thread_create(p uint32, name *byte, nlen uint, opts int, out *uint32) int32 |
| |
| //go:linkname sys_thread_start syscall/zx.Sys_thread_start |
| //go:noescape |
| func sys_thread_start(h uint32, pc unsafe.Pointer, stk unsafe.Pointer, mp uintptr, arg2 uintptr) int32 |
| |
| //go:linkname sys_clock_get_monotonic syscall/zx.Sys_clock_get_monotonic |
| func sys_clock_get_monotonic() int64 |
| |
| //go:linkname sys_clock_get syscall/zx.Sys_clock_get |
| //go:noescape |
| func sys_clock_get(clock_id uint32, out *int64) int32 |
| |
| //go:linkname sys_debuglog_create syscall/zx.Sys_debuglog_create |
| //go:noescape |
| func sys_debuglog_create(resource uint32, options uint32, out *uint32) int32 |
| |
| //go:linkname sys_debuglog_write syscall/zx.Sys_debuglog_write |
| func sys_debuglog_write(handle uint32, options uint32, buffer unsafe.Pointer, buffer_size uint) int32 |
| |
| //go:linkname sys_nanosleep syscall/zx.Sys_nanosleep |
| func sys_nanosleep(deadline int64) int32 |
| |
| //go:linkname sys_process_exit syscall/zx.Sys_process_exit |
| func sys_process_exit(retcode int64) |
| |
| //go:linkname zx_set_stdio_handle syscall/zx.set_stdio_handle |
| func zx_set_stdio_handle(n int, h uint32, t uint32) |
| |
| //go:linkname zx_set_proc_handle syscall/zx.set_proc_handle |
| func zx_set_proc_handle(h uint32) |
| |
| //go:linkname zx_set_vmar_root syscall/zx.set_vmar_root |
| func zx_set_vmar_root(h uint32) |
| |
| //go:linkname zx_set_namespace syscall/zx.set_namespace |
| func zx_set_namespace(m map[string]uint32) |
| |
| type mOS struct{} |
| |
| type sigset struct{} |
| |
| //go:nosplit |
| func getRandomData(r []byte) { |
| if len(r) == 0 { |
| return |
| } |
| sys_cprng_draw(unsafe.Pointer(&r[0]), uint(len(r))) |
| } |
| |
| //go:nosplit |
| func futexsleep(addr *uint32, val uint32, ns int64) { |
| deadline := sys_deadline_after(ns) |
| if ns < 0 { |
| deadline = nsecInf |
| } |
| sys_futex_wait((*int32)(unsafe.Pointer(addr)), int32(val), 0, deadline) |
| } |
| |
| //go:nosplit |
| func futexwakeup(addr *uint32, cnt uint32) { |
| sys_futex_wake((*int32)(unsafe.Pointer(addr)), cnt) |
| } |
| |
| // cgocallm0 calls a C function on the current stack. |
| // |
| // It is intended for use inside functions like osinit that run |
| // before mstart directly on an OS thread stack. |
| // |
| // If in doubt, do not use. |
| func cgocallm0(fn, arg unsafe.Pointer) |
| |
| func osinit() { |
| if _cgo_get_initial_handles != nil { |
| cgocallm0(_cgo_get_initial_handles, unsafe.Pointer(&fdioHandles)) |
| for i := 0; i < 3; i = i + 1 { |
| zx_set_stdio_handle(i, fdioHandles.stdioClones[i], fdioHandles.stdioCloneTypes[i]) |
| } |
| zx_set_proc_handle(fdioHandles.processSelf) |
| zx_set_vmar_root(fdioHandles.vmarRootSelf) |
| } else { |
| println("runtime: fuchsia requires cgo") |
| exit(2) |
| } |
| |
| ncpu = int32(sys_system_get_num_cpus()) |
| physPageSize = 4096 |
| } |
| |
| func parseRootNS() { |
| if fdioHandles.rootNSNumHandles > 0 { |
| const maxHandleCount = 1 << 20 // arbitrary |
| paths := (*(*[maxHandleCount]*byte)(unsafe.Pointer(fdioHandles.rootNSPaths)))[:fdioHandles.rootNSNumHandles] |
| handles := (*(*[maxHandleCount]uint32)(unsafe.Pointer(fdioHandles.rootNSHandles)))[:fdioHandles.rootNSNumHandles] |
| m := make(map[string]uint32) |
| for i, p := range paths { |
| m[gostring(p)] = handles[i] |
| } |
| |
| zx_set_namespace(m) |
| } |
| } |
| |
| // Filled in by runtime/cgo when linked into binary. |
| var ( |
| _cgo_get_initial_handles unsafe.Pointer |
| _cgo_get_thread_self_handle unsafe.Pointer |
| ) |
| |
| //go:nosplit |
| func minit() { |
| var h uint32 |
| asmcgocall(_cgo_get_thread_self_handle, unsafe.Pointer(&h)) |
| atomic.Storeuintptr(&getg().m.thread, uintptr(h)) |
| } |
| |
| //go:nosplit |
| func unminit() { |
| // TODO |
| } |
| |
| func sigpanic() { |
| // TODO |
| } |
| |
| func mpreinit(mp *m) { |
| // TODO |
| } |
| |
| //go:nosplit |
| func msigsave(mp *m) { |
| // TODO |
| } |
| |
| //go:nosplit |
| func msigrestore(sigmask sigset) { |
| // TODO |
| } |
| |
| func initsig(preinit bool) { |
| // TODO |
| } |
| |
| //go:nosplit |
| func sigblock() { |
| // TODO |
| } |
| |
| func sigenable(sig uint32) { |
| // TODO |
| } |
| |
| func sigdisable(sig uint32) { |
| // TODO |
| } |
| |
| func sigignore(sig uint32) { |
| // TODO |
| } |
| |
| func crash() { |
| *(*int32)(nil) = 0 |
| } |
| |
| func threadinit(mp *m) |
| |
| var gothreadname = []byte("gothread") |
| |
| //go:nowritebarrier |
| func newosproc(mp *m) { |
| stk := unsafe.Pointer(mp.g0.stack.hi) |
| p := uint32(0) // TODO: only works due to temporary hack in zircon |
| var h uint32 |
| status := sys_thread_create(p, &gothreadname[0], uint(len(gothreadname)), 0, &h) |
| if status < 0 { |
| println("runtime: newosproc sys_thread_create failed:", h) |
| exit(2) |
| } |
| |
| mp.thread = uintptr(h) |
| |
| status = sys_thread_start(h, unsafe.Pointer(funcPC(threadinit)), stk, uintptr(unsafe.Pointer(mp)), 0) |
| if status < 0 { |
| println("runtime: newosproc sys_thread_start failed:", h) |
| exit(2) |
| } |
| } |
| |
| type zx_proc_args struct { |
| protocol uint32 |
| version uint32 |
| handle_info_off uint32 |
| args_off uint32 |
| args_num uint32 |
| } |
| |
| type zx_proc_info struct { |
| magic uint32 |
| version uint32 |
| next_tls_slot uint32 |
| proc_args *zx_proc_args |
| handle *uint32 |
| handle_info *uint32 |
| handle_count int32 |
| argv **byte |
| argc int32 |
| } |
| |
| type zx_tls_root struct { |
| self *zx_tls_root |
| proc *zx_proc_info |
| magic uint32 |
| flags uint16 |
| maxslots uint16 |
| slots [8]uintptr // has length maxslots |
| } |
| |
| func resetcpuprofiler(hz int32) { |
| // TODO |
| } |
| |
| // fdio_init matches a Go struct of the same name in gcc_fdio.c. |
| type fdio_init struct { |
| stdioClones [3]uint32 |
| stdioCloneNumHandles [3]uint32 |
| stdioCloneTypes [3]uint32 |
| processSelf uint32 |
| vmarRootSelf uint32 |
| envlen int32 |
| environ **byte |
| _ [4]byte |
| rootNSNumHandles int32 |
| rootNSHandles *uint32 |
| rootNSPaths **byte |
| } |
| |
| var fdioHandles fdio_init |
| |
| func goenvs() { |
| if _cgo_get_initial_handles != nil { |
| const maxEnvSize = 1 << 20 // arbitrary |
| envp := (*(*[maxEnvSize]*byte)(unsafe.Pointer(fdioHandles.environ)))[:fdioHandles.envlen] |
| envs = make([]string, len(envp)) |
| for i, e := range envp { |
| envs[i] = gostring(e) |
| } |
| // TODO: find a better place to call this |
| parseRootNS() |
| } else { |
| // TODO: implement cgo-less init |
| println("runtime: no fuchsia process handle without cgo yet") |
| exit(2) |
| } |
| } |
| |
| const _NSIG = 65 // TODO |
| |
| const ( |
| ZX_CLOCK_MONOTONIC = 0 |
| ZX_HANDLE_INVALID = 0 |
| ZX_INFO_THREAD = uint32(10) |
| ZX_OK = int32(0) |
| ZX_THREAD_STATE_GENERAL_REGS = uint32(0) |
| ZX_THREAD_STATE_NEW = uint32(0) |
| ZX_THREAD_SUSPENDED = uint32(1 << 5) |
| ZX_THREAD_TERMINATED = uint32(1 << 3) |
| ZX_TIME_INFINITE = int64(^uint64(1 << 63)) |
| ZX_TIMER_SIGNALED = uint32(1 << 3) |
| ZX_TIMER_SLACK_EARLY = 1 |
| ) |
| |
| //go:nosplit |
| func nanotime() int64 { |
| return sys_clock_get_monotonic() |
| } |
| |
| //go:linkname time_now time.now |
| //go:nosplit |
| func time_now() (sec int64, nsec int32, mono int64) { |
| const ZX_CLOCK_UTC = 1 |
| var x int64 |
| sys_clock_get(ZX_CLOCK_UTC, &x) |
| return int64(x / 1e9), int32(x % 1e9), nanotime() |
| } |
| |
| func unixnanotime() int64 { |
| return nanotime() |
| } |
| |
| // #define LOGBUF_MAX (ZX_LOG_RECORD_MAX - sizeof(zx_log_record_t)) |
| const logBufMax = 224 |
| |
| var logger uint32 |
| var logBufferArray [logBufMax]byte |
| var logBufferN int |
| |
| //go:nosplit |
| func write(fd uintptr, buf unsafe.Pointer, n int32) int32 { |
| if fd != 1 && fd != 2 { |
| panic("runtime.write(): can not write to non-log fd") |
| } |
| |
| // This initialization mechanism is not thread-safe, but if the only calls to 'write' with |
| // these file descriptors come from print, they will already be serialized by a lock in |
| // print's implementation. |
| if logger <= 0 { |
| sys_debuglog_create(0, 0, &logger) |
| logBufferN = 0 |
| } |
| // Ideally this should panic, but that would go down "blind", which is arguably worse. |
| if logger <= 0 { |
| return -1 |
| } |
| |
| for i := int32(0); i < n; i++ { |
| c := *(*byte)(unsafe.Pointer(uintptr(buf) + uintptr(i))) |
| |
| if c == '\n' { |
| sys_debuglog_write(logger, 0, unsafe.Pointer(&logBufferArray[0]), uint(logBufferN)) |
| logBufferN = 0 |
| continue |
| } |
| |
| if c < ' ' { |
| continue |
| } |
| |
| logBufferArray[logBufferN] = c |
| logBufferN++ |
| |
| if logBufferN == len(logBufferArray) { |
| sys_debuglog_write(logger, 0, unsafe.Pointer(&logBufferArray[0]), uint(logBufferN)) |
| logBufferN = 0 |
| } |
| } |
| |
| return n |
| } |
| |
| //go:nosplit |
| func exit(code int32) { |
| sys_process_exit(int64(code)) |
| } |
| |
| //go:nosplit |
| func usleep(usec uint32) { |
| sys_nanosleep(sys_deadline_after(int64(usec * 1000))) |
| } |
| |
| func signame(sig uint32) string { |
| return "unknown_sig" // TODO |
| } |
| |
| // zircon_mktls |
| //go:nosplit |
| func zircon_mktls() { |
| // TODO |
| } |
| |
| //go:nosplit |
| func zircon_settls(val uintptr) { |
| // TODO: should call zx_set_object_property with property ZX_PROP_REGISTER_FS in amd64 or |
| // ZX_PROP_REGISTER_CP15 on arm. |
| } |
| |
| //go:linkname os_sigpipe os.sigpipe |
| func os_sigpipe() { |
| // TODO |
| } |
| |
| // gsignalStack is unused on fuchsia |
| type gsignalStack struct{} |
| |
| const ( |
| profStkSize = uintptr(1 << 16) |
| ) |
| |
| var ( |
| profTimer uint32 // timer handle |
| profThread uint32 // thread handle |
| profHz = ZX_TIME_INFINITE |
| profName = []byte("profiler") |
| profStk unsafe.Pointer |
| profStkFail = []byte("runtime: failed to allocate stack for the new OS thread\n") |
| ) |
| |
| type zxCpuSet struct { |
| mask [16]uint32 |
| } |
| |
| type zxInfoThread struct { |
| state uint32 |
| waitExceptionPortType uint32 |
| affinityMask zxCpuSet |
| } |
| |
| //go:noescape |
| //go:nosplit |
| func profiler_object_wait_one(handle uint32, signals uint32, deadline int64, observed *uint32) int32 |
| |
| func profilem(mp *m, thread uint32) { |
| // Ignore nascent threads. |
| var observed uint32 |
| var info zxInfoThread |
| var actualSize uint64 |
| var availSize uint64 |
| if status := vdsoCall_zx_object_get_info(thread, ZX_INFO_THREAD, unsafe.Pointer(&info), uint(unsafe.Sizeof(info)), unsafe.Pointer(&actualSize), unsafe.Pointer(&availSize)); status != ZX_OK { |
| return |
| } |
| if info.state == ZX_THREAD_STATE_NEW { |
| return |
| } |
| |
| // Wait for the thread to be suspended. |
| if status := profiler_object_wait_one(thread, ZX_THREAD_SUSPENDED|ZX_THREAD_TERMINATED, ZX_TIME_INFINITE, &observed); status != ZX_OK { |
| return |
| } |
| |
| // Ignore threads that were terminated as a result of suspension. |
| if observed&ZX_THREAD_TERMINATED == ZX_THREAD_TERMINATED { |
| return |
| } |
| |
| var r generalRegs |
| if status := vdsoCall_zx_thread_read_state(thread, ZX_THREAD_STATE_GENERAL_REGS, unsafe.Pointer(&r), uint(unsafe.Sizeof(r))); status != ZX_OK { |
| return |
| } |
| |
| if mp.curg != nil { |
| sigprof(r.rPC(), r.rSP(), r.rLR(), mp.curg, mp) |
| } else { |
| sigprofNonGoPC(r.rPC()) |
| } |
| } |
| |
| // profileLoop is called when pprof CPU tracing is enabled. It runs without a p, hence nowritebarrierrec. |
| // |
| //go:nosplit |
| //go:nowritebarrierrec |
| func profileLoop() { |
| me := getg().m |
| for { |
| var observed uint32 |
| if status := profiler_object_wait_one(profTimer, ZX_TIMER_SIGNALED, ZX_TIME_INFINITE, &observed); status != ZX_OK { |
| continue |
| } |
| |
| // Scan over all threads and suspend them, then trace. Ideally, we'd scan over |
| // all of them and give them a chance to suspend before this, but I don't want |
| // to deal with allocation here. |
| first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) |
| for mp := first; mp != nil; mp = mp.alllink { |
| thread := uint32(atomic.Loaduintptr(&mp.thread)) |
| // Do not profile threads blocked on Notes (this includes idle worker threads, idle timer thread, |
| // idle heap scavenger, etc.). Do not profile this thread. |
| if thread == ZX_HANDLE_INVALID || mp.profilehz == 0 || mp.blocked || mp == me { |
| continue |
| } |
| |
| var token uint32 |
| if status := vdsoCall_zx_task_suspend_token(thread, unsafe.Pointer(&token)); status == ZX_OK { |
| profilem(mp, thread) |
| } |
| vdsoCall_zx_handle_close(token) |
| |
| } |
| |
| due := vdsoCall_zx_deadline_after(profHz) |
| if status := vdsoCall_zx_timer_set(profTimer, due, ZX_TIMER_SLACK_EARLY); status != ZX_OK { |
| // TODO: what do? |
| return |
| } |
| } |
| } |
| |
| // profileloop is defined in sys_fuchsia_{amd,arm}64.s |
| func profileloop() |
| |
| func setProcessCPUProfiler(hz int32) { |
| // This function is entered in the critical section guarded by |
| // prof.signalLock in proc.go and is thus safe to assume the profiler's |
| // timer is created atomically. |
| if profTimer == ZX_HANDLE_INVALID { |
| if status := vdsoCall_zx_timer_create(ZX_TIMER_SLACK_EARLY, ZX_CLOCK_MONOTONIC, unsafe.Pointer(&profTimer)); status != ZX_OK { |
| // TODO: what do? |
| return |
| } |
| |
| newm(profileLoop, nil) |
| } |
| } |
| |
| func setThreadCPUProfiler(hz int32) { |
| // Don't do anything if we don't have a timer yet. |
| if profTimer == ZX_HANDLE_INVALID { |
| return |
| } |
| |
| atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz)) |
| |
| if hz > 0 { |
| ns := int64(int64(1000000000) / int64(hz)) |
| // Bound tracing frequency to microseconds. This is likely |
| // already well above the granularity traces can actually |
| // provide. |
| if ns < 1000 { |
| ns = 1000 |
| } |
| |
| profHz = ns |
| due := vdsoCall_zx_deadline_after(ns) |
| if status := vdsoCall_zx_timer_set(profTimer, due, ZX_TIMER_SLACK_EARLY); status != ZX_OK { |
| // TODO: what do? |
| return |
| } |
| } else { |
| if status := vdsoCall_zx_timer_cancel(profTimer); status != ZX_OK { |
| return |
| } |
| } |
| } |
| |
| func exitThread(wait *uint32) {} |
| |
| //go:nosplit |
| //go:nowritebarrierrec |
| func clearSignalHandlers() {} |