blob: a69525615bbfbcc9b594a35e4ccf3225602ce6aa [file] [log] [blame]
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Uses the output of the internal/fuchsia/abigen parser to produce Go assembly
// stubs to assign values to the vDSO entry points for syscalls.
//
// Regenerate with:
//
// go run mkfuchsiavdso.go -keys > vdso_keys_fuchsia.go
// go run mkfuchsiavdso.go -calls -arch=amd64 > vdsocalls_fuchsia_amd64.s
// go run mkfuchsiavdso.go -calls -arch=arm64 > vdsocalls_fuchsia_arm64.s
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"internal/fuchsia/abigen"
)
var (
keys = flag.Bool("keys", false, "generate VDSO symbol keys and call stubs")
calls = flag.Bool("calls", false, "generate ASM call implementation")
arch = flag.String("arch", "amd64", "architecture to generate calls for")
fuchsiaRoot = flag.String("fuchsia_root", filepath.Join(os.Getenv("HOME"), "fuchsia"), "path to fuchsia root")
)
func elfHashGNU(s string) uint32 {
h := uint32(5381)
for i := 0; i < len(s); i++ {
h = h<<5 + h + uint32(s[i])
}
return h
}
func main() {
flag.Parse()
switch *arch {
case "amd64":
case "arm64":
default:
log.Fatalf("GOARCH=%s not supported", *arch)
}
syscallsFile := filepath.Join(*fuchsiaRoot, "/zircon/system/public/zircon/syscalls.abigen")
if args := flag.Args(); len(args) != 0 {
syscallsFile = args[0]
}
b, err := ioutil.ReadFile(syscallsFile)
if err != nil {
log.Fatal(err)
}
p := abigen.NewParser(b, syscallsFile)
defs, err := p.Parse()
if err != nil {
log.Fatal(err)
}
buf := new(bytes.Buffer)
if *keys {
writeKeys(buf, defs)
} else if *calls {
writeCalls(buf, defs)
}
buf.WriteTo(os.Stdout)
}
func writeKeys(buf *bytes.Buffer, defs []abigen.SysDef) {
fmt.Fprint(buf, keyHeader[1:])
fmt.Fprint(buf, "var vdsoSymbolKeys = []vdsoSymbolKey{\n")
for _, def := range defs {
sym := "_zx_" + def.Name
fmt.Fprintf(buf, "\t{\"%s\", 0x%x, &vdso_zx_%s},\n", sym, elfHashGNU(sym), def.Name)
}
fmt.Fprint(buf, "}\n")
fmt.Fprint(buf, "\n")
for _, def := range defs {
fmt.Fprintf(buf, "//go:cgo_import_dynamic vdso_zx_%s _zx_%s\n", def.Name, def.Name)
}
fmt.Fprint(buf, "\n")
for _, def := range defs {
fmt.Fprintf(buf, "//go:linkname vdso_zx_%s vdso_zx_%s\n", def.Name, def.Name)
}
fmt.Fprint(buf, "\n")
for _, def := range defs {
fmt.Fprint(buf, "//go:noescape\n")
fmt.Fprint(buf, "//go:nosplit\n")
printStub(buf, def)
fmt.Fprint(buf, "\n")
}
fmt.Fprint(buf, "var (\n")
for _, def := range defs {
fmt.Fprintf(buf, "\tvdso_zx_%s uintptr\n", def.Name)
}
fmt.Fprint(buf, ")\n")
}
func writeCalls(buf *bytes.Buffer, defs []abigen.SysDef) {
fmt.Fprint(buf, callHeader[1:])
for _, def := range defs {
fmt.Fprint(buf, "// ")
printStub(buf, def)
printAsm(buf, def)
fmt.Fprint(buf, "\n")
}
}
const keyHeader = `
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Keys for vDSO symbols.
// Generated by mkfuchsiavdso.go, do not edit.
package runtime
import "unsafe"
const (
// vdsoArrayMax is the byte-size of a maximally sized array on this architecture.
// See cmd/compile/internal/amd64/galign.go arch.MAXWIDTH initialization.
vdsoArrayMax = 1<<50 - 1
)
`
const callHeader = `
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Zircon system calls for the Fuchsia OS.
// Generated by mkfuchsia.go, do not edit.
#include "go_asm.h"
#include "go_tls.h"
#include "textflag.h"
#include "funcdata.h"
`
func printStub(buf *bytes.Buffer, def abigen.SysDef) {
fmt.Fprintf(buf, "func vdsoCall_zx_%s(", def.Name)
for i, arg := range def.Args {
if arg.Type == (abigen.SysType{}) {
continue
}
if i > 0 {
fmt.Fprint(buf, ", ")
}
fmt.Fprintf(buf, "%s ", arg.Name)
fmt.Fprintf(buf, "%s", arg.Type.NativeType())
}
fmt.Fprint(buf, ")")
if def.Ret != (abigen.SysType{}) {
fmt.Fprint(buf, " ")
fmt.Fprintf(buf, "%s", def.Ret.NativeType())
}
fmt.Fprint(buf, "\n")
}
// amd64RegArgs is the amd64 registers in function argument calling convention order.
var amd64RegArgs = []string{"DI", "SI", "DX", "CX", "R8", "R9", "R12", "R13"}
// arm64RegArgs is the arm64 registers in function argument calling convention order.
var arm64RegArgs = []string{"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7"}
// blockingSyscalls is a map of known syscalls which may block.
// TODO(dhobsd): Is this list accurate? Potentially add nanosleep, channel_call, and others.
var blockingSyscalls = map[string]bool{
"port_wait": true,
"object_wait_one": true,
"object_wait_many": true,
}
func printAsm(buf *bytes.Buffer, def abigen.SysDef) {
// The summed size of all arguments.
argSize := 0
for _, arg := range def.Args {
sz := arg.Type.Size()
if *arch == "arm64" && sz == 1 {
sz = 8
}
for argSize%sz != 0 {
// Add padding until the 'argSize' is aligned to the type we are adding.
argSize++
}
argSize += sz
}
if argSize%8 == 4 {
// Force the return argument on the stack to be 8-byte aligned, not 4-byte aligned
argSize += 4
}
retSize := def.Ret.Size()
var frameSize int
var regArgs []string
var callIns, retReg, suffix4, suffix8 string
switch *arch {
case "amd64":
regArgs = amd64RegArgs
callIns = "CALL"
retReg = "AX"
suffix8 = "Q"
suffix4 = "L"
frameSize = 8
switch len(def.Args) {
case 7:
frameSize += 16 + 8
case 8:
frameSize += 16 + 2*8
}
case "arm64":
regArgs = arm64RegArgs
callIns = "BL"
retReg = "R0"
suffix8 = "D"
suffix4 = "W"
default:
panic(fmt.Sprintf("arch=%s not supported", *arch))
}
fmt.Fprintf(buf, "TEXT runtime·vdsoCall_zx_%s(SB),NOSPLIT,$%d-%d\n", def.Name, frameSize, argSize+retSize)
fmt.Fprint(buf, "\tGO_ARGS\n")
fmt.Fprint(buf, "\tNO_LOCAL_POINTERS\n")
// Set vdso{PC,SP} so that pprof tracebacks work for VDSO calls.
switch *arch {
case "amd64":
fmt.Fprint(buf, "\tget_tls(CX)\n")
fmt.Fprint(buf, "\tMOVQ g(CX), AX\n")
fmt.Fprint(buf, "\tMOVQ g_m(AX), R14\n")
fmt.Fprint(buf, "\tPUSHQ R14\n")
fmt.Fprintf(buf, "\tMOVQ %d(SP), DX\n", frameSize+16)
fmt.Fprint(buf, "\tMOVQ DX, m_vdsoPC(R14)\n")
fmt.Fprintf(buf, "\tLEAQ %d(SP), DX\n", frameSize+16)
fmt.Fprint(buf, "\tMOVQ DX, m_vdsoSP(R14)\n")
case "arm64":
fmt.Fprint(buf, "\tMOVD g_m(g), R21\n")
fmt.Fprint(buf, "\tMOVD LR, m_vdsoPC(R21)\n")
fmt.Fprint(buf, "\tMOVD RSP, R20\n")
fmt.Fprint(buf, "\tMOVD R20, m_vdsoSP(R21)\n")
fmt.Fprint(buf, "\tMOVD R21, 8(RSP)\n")
}
if _, ok := blockingSyscalls[def.Name]; ok {
fmt.Fprintf(buf, "\tCALL runtime·entersyscall(SB)\n")
}
off := 0
for i, arg := range def.Args {
name := arg.Name
suffix := suffix8
t := arg.Type
sz := t.Size()
if sz == 4 {
suffix = suffix4
} else if *arch == "arm64" && sz == 1 {
sz = 8
}
for off%sz != 0 {
// Add padding until the offset is aligned to the type we are accessing
off++
}
fmt.Fprintf(buf, "\tMOV%s %s+%d(FP), %s\n", suffix, name, off, regArgs[i])
off += sz
}
switch *arch {
case "amd64":
if len(def.Args) >= 7 {
fmt.Fprintf(buf, "\tMOVQ SP, BP // BP is preserved across vsdo call by the x86-64 ABI\n")
fmt.Fprintf(buf, "\tANDQ $~15, SP // stack alignment for x86-64 ABI\n")
if len(def.Args) == 8 {
fmt.Fprintf(buf, "\tPUSHQ R13\n")
}
fmt.Fprintf(buf, "\tPUSHQ R12\n")
}
fmt.Fprintf(buf, "\tMOVQ vdso_zx_%s(SB), AX\n", def.Name)
fmt.Fprintf(buf, "\tCALL AX\n")
if len(def.Args) >= 7 {
fmt.Fprintf(buf, "\tPOPQ R12\n")
if len(def.Args) == 8 {
fmt.Fprintf(buf, "\tPOPQ R13\n")
}
fmt.Fprintf(buf, "\tMOVQ BP, SP\n")
}
case "arm64":
fmt.Fprintf(buf, "\tBL vdso_zx_%s(SB)\n", def.Name)
}
if retSize := def.Ret.Size(); retSize > 0 {
suffix := suffix8
if retSize == 4 {
suffix = suffix4
}
fmt.Fprintf(buf, "\tMOV%s %s, ret+%d(FP)\n", suffix, retReg, argSize)
}
if _, ok := blockingSyscalls[def.Name]; ok {
fmt.Fprintf(buf, "\t%s runtime·exitsyscall(SB)\n", callIns)
}
// Clear vdsoSP. sigprof only checks vdsoSP for generating tracebacks, so we can leave vdsoPC alone.
switch *arch {
case "amd64":
fmt.Fprintf(buf, "\tPOPQ R14\n")
fmt.Fprintf(buf, "\tMOVQ $0, m_vdsoSP(R14)\n")
case "arm64":
fmt.Fprint(buf, "\tMOVD g_m(g), R21\n")
fmt.Fprintf(buf, "\tMOVD $0, m_vdsoSP(R21)\n")
}
fmt.Fprintf(buf, "\tRET\n")
}