Initial commit of llgo.
llvm-svn: 222857
GitOrigin-RevId: ad9841e8ac74bfcb1814b728a143408e87dd00a7
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..5aaec5a
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,170 @@
+include(ExternalProject)
+include(ProcessorCount)
+
+llvm_add_go_executable(llgo llvm.org/llgo/cmd/gllgo ALL DEPENDS
+ build/context.go
+ cmd/gllgo/gllgo.go
+ debug/debug.go
+ irgen/annotations.go
+ irgen/attribute.go
+ irgen/builtins.go
+ irgen/cabi.go
+ irgen/call.go
+ irgen/channels.go
+ irgen/closures.go
+ irgen/compiler.go
+ irgen/errors.go
+ irgen/indirect.go
+ irgen/interfaces.go
+ irgen/maps.go
+ irgen/parser.go
+ irgen/predicates.go
+ irgen/println.go
+ irgen/runtime.go
+ irgen/slice.go
+ irgen/ssa.go
+ irgen/strings.go
+ irgen/targets.go
+ irgen/typemap.go
+ irgen/types.go
+ irgen/utils.go
+ irgen/value.go
+ irgen/version.go
+ ssaopt/esc.go
+)
+
+install(FILES ${CMAKE_BINARY_DIR}/bin/llgo${CMAKE_EXECUTABLE_SUFFIX}
+ DESTINATION bin
+ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE
+ GROUP_READ GROUP_EXECUTE
+ WORLD_READ WORLD_EXECUTE)
+
+llvm_add_go_executable(llgo-stage2 llvm.org/llgo/cmd/gllgo
+ DEPENDS libgo ${CMAKE_BINARY_DIR}/bin/llgo${CMAKE_EXECUTABLE_SUFFIX}
+ GOFLAGS "cc=${CMAKE_BINARY_DIR}/bin/clang"
+ "cxx=${CMAKE_BINARY_DIR}/bin/clang++"
+ "llgo=${CMAKE_BINARY_DIR}/bin/llgo${CMAKE_EXECUTABLE_SUFFIX}"
+)
+
+llvm_add_go_executable(llgo-stage3 llvm.org/llgo/cmd/gllgo
+ DEPENDS libgo ${CMAKE_BINARY_DIR}/bin/llgo-stage2${CMAKE_EXECUTABLE_SUFFIX}
+ GOFLAGS "cc=${CMAKE_BINARY_DIR}/bin/clang"
+ "cxx=${CMAKE_BINARY_DIR}/bin/clang++"
+ "llgo=${CMAKE_BINARY_DIR}/bin/llgo-stage2${CMAKE_EXECUTABLE_SUFFIX}"
+)
+
+llvm_add_go_executable(cc-wrapper llvm.org/llgo/cmd/cc-wrapper DEPENDS
+ cmd/cc-wrapper/main.go
+)
+
+function(add_clobber_steps name)
+ ExternalProject_Add_Step(${name} force-reconfigure
+ DEPENDERS configure
+ ALWAYS 1
+ )
+
+ ExternalProject_Add_Step(${name} clobber
+ COMMAND ${CMAKE_COMMAND} -E remove_directory <BINARY_DIR>
+ COMMAND ${CMAKE_COMMAND} -E make_directory <BINARY_DIR>
+ COMMENT "Clobbering ${name} build directory..."
+ DEPENDERS configure
+ DEPENDS ${ARGN}
+ )
+endfunction()
+
+processorcount(PROCESSOR_COUNT)
+
+function(add_libgo_variant suffix cflags gocflags deps exclude_from_all)
+ externalproject_add(libbacktrace${suffix}
+ DEPENDS clang ${deps}
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gofrontend/libbacktrace
+ BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${suffix}/libbacktrace
+ CONFIGURE_COMMAND <SOURCE_DIR>/configure --disable-multilib --enable-host-shared "CC=${CMAKE_BINARY_DIR}/bin/clang ${cflags}"
+ BUILD_COMMAND make -j${PROCESSOR_COUNT}
+ INSTALL_COMMAND ""
+ LOG_CONFIGURE 1
+ LOG_BUILD 1
+ )
+ set_property(TARGET libbacktrace${suffix}
+ PROPERTY EXCLUDE_FROM_ALL ${exclude_from_all})
+
+ add_clobber_steps(libbacktrace${suffix} clang ${deps})
+
+ externalproject_add(libffi${suffix}
+ DEPENDS clang ${deps}
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gofrontend/libffi
+ BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${suffix}/libffi
+ CONFIGURE_COMMAND <SOURCE_DIR>/configure --disable-multilib "CC=${CMAKE_BINARY_DIR}/bin/clang ${cflags}"
+ BUILD_COMMAND make -j${PROCESSOR_COUNT}
+ INSTALL_COMMAND ""
+ LOG_CONFIGURE 1
+ LOG_BUILD 1
+ )
+ set_property(TARGET libffi${suffix}
+ PROPERTY EXCLUDE_FROM_ALL ${exclude_from_all})
+
+ add_clobber_steps(libffi${suffix} clang ${deps})
+
+ externalproject_add(libgo${suffix}
+ DEPENDS clang llgo cc-wrapper libbacktrace${suffix} libffi${suffix} ${deps}
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gofrontend/libgo
+ BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${suffix}/libgo
+ INSTALL_DIR ${CMAKE_BINARY_DIR}
+ CONFIGURE_COMMAND <SOURCE_DIR>/configure --disable-multilib --without-libatomic --prefix=<INSTALL_DIR> "CC=env REAL_CC=${CMAKE_BINARY_DIR}/bin/clang@SPACE@${cflags} ${CMAKE_BINARY_DIR}/bin/cc-wrapper" "GOC=${CMAKE_BINARY_DIR}/bin/llgo -no-prefix -fcompilerrt-prefix=${CMAKE_BINARY_DIR} ${gocflags}"
+ BUILD_COMMAND make -j${PROCESSOR_COUNT}
+ LOG_CONFIGURE 1
+ LOG_BUILD 1
+ LOG_INSTALL 1
+ )
+ set_property(TARGET libgo${suffix}
+ PROPERTY EXCLUDE_FROM_ALL ${exclude_from_all})
+
+ add_clobber_steps(libgo${suffix} clang
+ ${CMAKE_BINARY_DIR}/bin/llgo${CMAKE_EXECUTABLE_SUFFIX}
+ ${CMAKE_BINARY_DIR}/bin/cc-wrapper${CMAKE_EXECUTABLE_SUFFIX})
+endfunction()
+
+add_libgo_variant("" "" "" "" FALSE)
+
+if(TARGET asan)
+ add_libgo_variant("_asan" "-fsanitize=address" "-fsanitize=address" asan TRUE)
+endif()
+
+if(TARGET tsan)
+ add_libgo_variant("_tsan" "-fsanitize=thread" "-fsanitize=thread" tsan TRUE)
+endif()
+
+if(TARGET msan)
+ add_libgo_variant("_msan" "-fsanitize=memory" "-fsanitize=memory" msan TRUE)
+endif()
+
+if(TARGET dfsan)
+ add_libgo_variant("_dfsan" "-fsanitize=dataflow" "-fsanitize=dataflow" dfsan TRUE)
+endif()
+
+install(FILES ${CMAKE_BINARY_DIR}/lib/libgo-llgo.a
+ ${CMAKE_BINARY_DIR}/lib/libgo-llgo.so
+ ${CMAKE_BINARY_DIR}/lib/libgo-llgo.so.6
+ ${CMAKE_BINARY_DIR}/lib/libgo-llgo.so.6.0.0
+ ${CMAKE_BINARY_DIR}/lib/libgobegin-llgo.a
+ DESTINATION lib)
+
+install(DIRECTORY ${CMAKE_BINARY_DIR}/lib/go
+ DESTINATION lib)
+
+add_custom_target(check-libgo
+ COMMAND make -C ${CMAKE_CURRENT_BINARY_DIR}/libgo -j${PROCESSOR_COUNT} check
+ DEPENDS libgo
+ COMMENT "Running libgo tests")
+
+add_custom_target(check-llgo-bootstrap
+ COMMAND strip -R .note.gnu.build-id -o ${CMAKE_CURRENT_BINARY_DIR}/llgo-stage2.stripped
+ ${CMAKE_BINARY_DIR}/bin/llgo-stage2${CMAKE_EXECUTABLE_SUFFIX}
+ COMMAND strip -R .note.gnu.build-id -o ${CMAKE_CURRENT_BINARY_DIR}/llgo-stage3.stripped
+ ${CMAKE_BINARY_DIR}/bin/llgo-stage3${CMAKE_EXECUTABLE_SUFFIX}
+ COMMAND cmp ${CMAKE_CURRENT_BINARY_DIR}/llgo-stage2.stripped
+ ${CMAKE_CURRENT_BINARY_DIR}/llgo-stage3.stripped
+ DEPENDS llgo-stage2 llgo-stage3
+ COMMENT "Checking llgo bootstrap")
+
+add_subdirectory(test)
diff --git a/LICENSE.TXT b/LICENSE.TXT
new file mode 100644
index 0000000..900fd7c
--- /dev/null
+++ b/LICENSE.TXT
@@ -0,0 +1,93 @@
+The llgo distribution, excluding the contents of the 'include' and
+'third_party' directories, is licensed under the University of Illinois
+"BSD-Like" license.
+
+The contents of the 'include' directory are dual licensed under both the
+University of Illinois "BSD-Like" license and the MIT license. As a user of
+this code you may choose to use it under either license. As a contributor,
+you agree to allow your code to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2007-2014 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+go.tools third_party/go.tools
+gofrontend third_party/gofrontend
+
diff --git a/README.TXT b/README.TXT
new file mode 100644
index 0000000..e3c2fb3
--- /dev/null
+++ b/README.TXT
@@ -0,0 +1,74 @@
+llgo
+====
+
+llgo is a Go (http://golang.org) frontend for LLVM, written in Go.
+
+llgo is under active development. It compiles and passes most of the
+standard library test suite and a substantial portion of the gc test suite,
+but there are some corner cases that are known not to be handled correctly
+yet. Nevertheless it can compile modestly substantial programs (including
+itself; it is self hosting on x86-64 Linux).
+
+Mailing list: https://groups.google.com/d/forum/llgo-dev
+
+Supported platforms
+-------------------
+
+llgo is currently only supported on the x86-64 Linux platform. Contributions
+that add support for other platforms are welcome.
+
+There are two components which would need to be ported to new platforms: the
+compiler and the runtime library. The compiler has little platform-specific
+code; the most significant is in irgen/cabi.go. The main limiting factor
+for new platforms is the runtime library in third_party/gofrontend/libgo,
+which inherits some support for other platforms from the gc compiler's
+runtime library, but this support tends to be incomplete.
+
+Installation
+------------
+
+llgo requires:
+* Go 1.3 or later.
+* CMake 2.8.8 or later (to build LLVM).
+* A modern C++ toolchain (to build LLVM).
+ http://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
+
+Note that Ubuntu Precise is one Linux distribution which does not package
+a sufficiently new CMake or C++ toolchain.
+
+To build and install llgo:
+
+ # Checkout LLVM:
+ svn co http://llvm.org/svn/llvm-project/llvm/trunk /path/to/llvm
+
+ # Checkout Clang:
+ cd /path/to/llvm/tools
+ svn co http://llvm.org/svn/llvm-project/cfe/trunk clang
+
+ # Checkout llgo:
+ svn co http://llvm.org/svn/llvm-project/llgo/trunk llgo
+
+ # Build LLVM, Clang and llgo: (see also http://llvm.org/docs/CMake.html)
+ mkdir /path/to/llvm-build
+ cd /path/to/llvm-build
+ cmake /path/to/llvm -DCMAKE_INSTALL_PREFIX=/path/to/llvm-inst
+ make install
+
+Running
+-------
+
+llgo is the compiler binary. It has a command line interface that is intended
+to be compatible to a large extent with gccgo. It can be used with the go
+command shipped with Go 1.4 or later by setting $GCCGO to the path to the
+llgo executable and supplying the "-compiler gccgo" flags to "go build".
+
+Contributing
+------------
+
+Changes to code outside the third_party directory should be contributed in
+the normal way by sending patches to <llvm-commits@cs.uiuc.edu>.
+
+Changes to code in the third_party directory must first be made in the
+respective upstream project, from which they will be mirrored into the llgo
+repository. See the script update_third_party.sh for the locations of the
+upstream projects and details of how the mirroring works.
diff --git a/build/context.go b/build/context.go
new file mode 100644
index 0000000..c1ed935
--- /dev/null
+++ b/build/context.go
@@ -0,0 +1,95 @@
+//===- context.go - Build context utilities for llgo ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Build context utilities for llgo.
+//
+//===----------------------------------------------------------------------===//
+
+package build
+
+import (
+ "errors"
+ "go/build"
+ "regexp"
+ "strings"
+)
+
+type Context struct {
+ build.Context
+
+ // LLVM triple
+ Triple string
+}
+
+// ContextFromTriple returns a new go/build.Context with GOOS and GOARCH
+// configured from the given triple.
+func ContextFromTriple(triple string) (*Context, error) {
+ goos, goarch, err := parseTriple(triple)
+ if err != nil {
+ return nil, err
+ }
+ ctx := &Context{Context: build.Default, Triple: triple}
+ ctx.GOOS = goos
+ ctx.GOARCH = goarch
+ ctx.BuildTags = append(ctx.BuildTags, "llgo")
+ if triple == "pnacl" {
+ ctx.BuildTags = append(ctx.BuildTags, "pnacl")
+ }
+ return ctx, nil
+}
+
+func parseTriple(triple string) (goos string, goarch string, err error) {
+ if strings.ToLower(triple) == "pnacl" {
+ return "nacl", "le32", nil
+ }
+
+ type REs struct{ re, out string }
+ // reference: http://llvm.org/docs/doxygen/html/Triple_8cpp_source.html
+ goarchREs := []REs{
+ {"amd64|x86_64", "amd64"},
+ {"i[3-9]86", "386"},
+ {"xscale|((arm|thumb)(v.*)?)", "arm"},
+ }
+ goosREs := []REs{
+ {"linux.*", "linux"},
+ {"(darwin|macosx|ios).*", "darwin"},
+ {"k?freebsd.*", "freebsd"},
+ {"netbsd.*", "netbsd"},
+ {"openbsd.*", "openbsd"},
+ }
+ match := func(list []REs, s string) string {
+ for _, t := range list {
+ if matched, _ := regexp.MatchString(t.re, s); matched {
+ return t.out
+ }
+ }
+ return ""
+ }
+
+ s := strings.Split(triple, "-")
+ switch l := len(s); l {
+ default:
+ return "", "", errors.New("triple should be made up of 2, 3, or 4 parts.")
+ case 2, 3: // ARCHITECTURE-(VENDOR-)OPERATING_SYSTEM
+ goarch = s[0]
+ goos = s[l-1]
+ case 4: // ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
+ goarch = s[0]
+ goos = s[2]
+ }
+ goarch = match(goarchREs, goarch)
+ if goarch == "" {
+ return "", "", errors.New("unknown architecture in triple")
+ }
+ goos = match(goosREs, goos)
+ if goos == "" {
+ return "", "", errors.New("unknown OS in triple")
+ }
+ return goos, goarch, nil
+}
diff --git a/cmd/cc-wrapper/main.go b/cmd/cc-wrapper/main.go
new file mode 100644
index 0000000..5a37522
--- /dev/null
+++ b/cmd/cc-wrapper/main.go
@@ -0,0 +1,71 @@
+//===- main.go - Clang compiler wrapper for building libgo ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a wrapper for Clang that passes invocations with -fdump-go-spec to
+// GCC, and rewrites -fplan9-extensions to -fms-extensions. It is intended to
+// go away once libgo's build no longer uses these flags.
+//
+//===----------------------------------------------------------------------===//
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func runproc(name string, argv []string) {
+ path, err := exec.LookPath(name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cc-wrapper: could not find %s: %v\n", name, err)
+ os.Exit(1)
+ }
+
+ proc, err := os.StartProcess(path, append([]string{name}, argv...), &os.ProcAttr{
+ Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
+ })
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cc-wrapper: could not start %s: %v\n", name, err)
+ os.Exit(1)
+ }
+
+ state, err := proc.Wait()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cc-wrapper: could not wait for %s: %v\n", name, err)
+ os.Exit(1)
+ }
+
+ if state.Success() {
+ os.Exit(0)
+ } else {
+ os.Exit(1)
+ }
+}
+
+func main() {
+ newargs := make([]string, len(os.Args)-1)
+ for i, arg := range os.Args[1:] {
+ switch {
+ case strings.HasPrefix(arg, "-fdump-go-spec"):
+ runproc("gcc", os.Args[1:])
+
+ case arg == "-fplan9-extensions":
+ newargs[i] = "-fms-extensions"
+ newargs = append(newargs, "-Wno-microsoft")
+
+ default:
+ newargs[i] = arg
+ }
+ }
+
+ ccargs := strings.Split(os.Getenv("REAL_CC"), "@SPACE@")
+ runproc(ccargs[0], append(ccargs[1:], newargs...))
+}
diff --git a/cmd/gllgo/gllgo.go b/cmd/gllgo/gllgo.go
new file mode 100644
index 0000000..db0eee9
--- /dev/null
+++ b/cmd/gllgo/gllgo.go
@@ -0,0 +1,811 @@
+//===- gllgo.go - gccgo-like driver for llgo ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is llgo's driver. It has a gccgo-like interface in order to easily
+// interoperate with the "go" command and the libgo build system.
+//
+//===----------------------------------------------------------------------===//
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "go/scanner"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "llvm.org/llgo/debug"
+ "llvm.org/llgo/irgen"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+func report(err error) {
+ if list, ok := err.(scanner.ErrorList); ok {
+ for _, e := range list {
+ fmt.Fprintf(os.Stderr, "%s\n", e)
+ }
+ } else if err != nil {
+ fmt.Fprintf(os.Stderr, "gllgo: error: %s\n", err)
+ }
+}
+
+func llvmVersion() string {
+ return strings.Replace(llvm.Version, "svn", "", 1)
+}
+
+func displayVersion() {
+ fmt.Printf("llgo version %s (%s)\n\n", llvmVersion(), irgen.GoVersion())
+ os.Exit(0)
+}
+
+func initCompiler(opts *driverOptions) (*irgen.Compiler, error) {
+ importPaths := make([]string, len(opts.importPaths)+len(opts.libPaths))
+ copy(importPaths, opts.importPaths)
+ copy(importPaths[len(opts.importPaths):], opts.libPaths)
+ if opts.prefix != "" {
+ importPaths = append(importPaths, filepath.Join(opts.prefix, "lib", "go", "llgo-"+llvmVersion()))
+ }
+ copts := irgen.CompilerOptions{
+ TargetTriple: opts.triple,
+ GenerateDebug: opts.generateDebug,
+ DebugPrefixMaps: opts.debugPrefixMaps,
+ DumpSSA: opts.dumpSSA,
+ GccgoPath: opts.gccgoPath,
+ ImportPaths: importPaths,
+ SanitizerAttribute: opts.sanitizer.getAttribute(),
+ }
+ if opts.dumpTrace {
+ copts.Logger = log.New(os.Stderr, "", 0)
+ }
+ return irgen.NewCompiler(copts)
+}
+
+type actionKind int
+
+const (
+ actionAssemble = actionKind(iota)
+ actionCompile
+ actionLink
+ actionPrint
+)
+
+type action struct {
+ kind actionKind
+ inputs []string
+}
+
+type sanitizerOptions struct {
+ blacklist string
+ crtPrefix string
+
+ address, thread, memory, dataflow bool
+}
+
+func (san *sanitizerOptions) resourcePath() string {
+ return filepath.Join(san.crtPrefix, "lib", "clang", llvmVersion())
+}
+
+func (san *sanitizerOptions) isPIEDefault() bool {
+ return san.thread || san.memory || san.dataflow
+}
+
+func (san *sanitizerOptions) addPasses(mpm, fpm llvm.PassManager) {
+ switch {
+ case san.address:
+ mpm.AddAddressSanitizerModulePass()
+ fpm.AddAddressSanitizerFunctionPass()
+ case san.thread:
+ mpm.AddThreadSanitizerPass()
+ case san.memory:
+ mpm.AddMemorySanitizerPass()
+ case san.dataflow:
+ blacklist := san.blacklist
+ if blacklist == "" {
+ blacklist = filepath.Join(san.resourcePath(), "dfsan_abilist.txt")
+ }
+ mpm.AddDataFlowSanitizerPass(blacklist)
+ }
+}
+
+func (san *sanitizerOptions) libPath(triple, sanitizerName string) string {
+ s := strings.Split(triple, "-")
+ return filepath.Join(san.resourcePath(), "lib", s[2], "libclang_rt."+sanitizerName+"-"+s[0]+".a")
+}
+
+func (san *sanitizerOptions) addLibsForSanitizer(flags []string, triple, sanitizerName string) []string {
+ return append(flags, san.libPath(triple, sanitizerName),
+ "-Wl,--no-as-needed", "-lpthread", "-lrt", "-lm", "-ldl")
+}
+
+func (san *sanitizerOptions) addLibs(triple string, flags []string) []string {
+ switch {
+ case san.address:
+ flags = san.addLibsForSanitizer(flags, triple, "asan")
+ case san.thread:
+ flags = san.addLibsForSanitizer(flags, triple, "tsan")
+ case san.memory:
+ flags = san.addLibsForSanitizer(flags, triple, "msan")
+ case san.dataflow:
+ flags = san.addLibsForSanitizer(flags, triple, "dfsan")
+ }
+
+ return flags
+}
+
+func (san *sanitizerOptions) getAttribute() llvm.Attribute {
+ switch {
+ case san.address:
+ return llvm.SanitizeAddressAttribute
+ case san.thread:
+ return llvm.SanitizeThreadAttribute
+ case san.memory:
+ return llvm.SanitizeMemoryAttribute
+ default:
+ return 0
+ }
+}
+
+type driverOptions struct {
+ actions []action
+ output string
+
+ bprefix string
+ debugPrefixMaps []debug.PrefixMap
+ dumpSSA bool
+ dumpTrace bool
+ emitIR bool
+ gccgoPath string
+ generateDebug bool
+ importPaths []string
+ libPaths []string
+ llvmArgs []string
+ lto bool
+ optLevel int
+ pic bool
+ pieLink bool
+ pkgpath string
+ plugins []string
+ prefix string
+ sanitizer sanitizerOptions
+ sizeLevel int
+ staticLibgcc bool
+ staticLibgo bool
+ staticLink bool
+ triple string
+}
+
+func getInstPrefix() (string, error) {
+ path, err := exec.LookPath(os.Args[0])
+ if err != nil {
+ return "", err
+ }
+
+ path, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ prefix := filepath.Join(path, "..", "..")
+ return prefix, nil
+}
+
+func parseArguments(args []string) (opts driverOptions, err error) {
+ var goInputs, otherInputs []string
+ hasOtherNonFlagInputs := false
+ noPrefix := false
+ actionKind := actionLink
+ opts.triple = llvm.DefaultTargetTriple()
+
+ for len(args) > 0 {
+ consumedArgs := 1
+
+ switch {
+ case !strings.HasPrefix(args[0], "-"):
+ if strings.HasSuffix(args[0], ".go") {
+ goInputs = append(goInputs, args[0])
+ } else {
+ hasOtherNonFlagInputs = true
+ otherInputs = append(otherInputs, args[0])
+ }
+
+ case strings.HasPrefix(args[0], "-Wl,"), strings.HasPrefix(args[0], "-l"), strings.HasPrefix(args[0], "--sysroot="):
+ // TODO(pcc): Handle these correctly.
+ otherInputs = append(otherInputs, args[0])
+
+ case args[0] == "-B":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-B'")
+ }
+ opts.bprefix = args[1]
+ consumedArgs = 2
+
+ case args[0] == "-D":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-D'")
+ }
+ otherInputs = append(otherInputs, args[0], args[1])
+ consumedArgs = 2
+
+ case strings.HasPrefix(args[0], "-D"):
+ otherInputs = append(otherInputs, args[0])
+
+ case args[0] == "-I":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-I'")
+ }
+ opts.importPaths = append(opts.importPaths, args[1])
+ consumedArgs = 2
+
+ case strings.HasPrefix(args[0], "-I"):
+ opts.importPaths = append(opts.importPaths, args[0][2:])
+
+ case args[0] == "-isystem":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-isystem'")
+ }
+ otherInputs = append(otherInputs, args[0], args[1])
+ consumedArgs = 2
+
+ case args[0] == "-L":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-L'")
+ }
+ opts.libPaths = append(opts.libPaths, args[1])
+ consumedArgs = 2
+
+ case strings.HasPrefix(args[0], "-L"):
+ opts.libPaths = append(opts.libPaths, args[0][2:])
+
+ case args[0] == "-O0":
+ opts.optLevel = 0
+
+ case args[0] == "-O1", args[0] == "-O":
+ opts.optLevel = 1
+
+ case args[0] == "-O2":
+ opts.optLevel = 2
+
+ case args[0] == "-Os":
+ opts.optLevel = 2
+ opts.sizeLevel = 1
+
+ case args[0] == "-O3":
+ opts.optLevel = 3
+
+ case args[0] == "-S":
+ actionKind = actionAssemble
+
+ case args[0] == "-c":
+ actionKind = actionCompile
+
+ case strings.HasPrefix(args[0], "-fcompilerrt-prefix="):
+ opts.sanitizer.crtPrefix = args[0][20:]
+
+ case strings.HasPrefix(args[0], "-fdebug-prefix-map="):
+ split := strings.SplitN(args[0][19:], "=", 2)
+ if len(split) < 2 {
+ return opts, fmt.Errorf("argument '%s' must be of form '-fdebug-prefix-map=SOURCE=REPLACEMENT'", args[0])
+ }
+ opts.debugPrefixMaps = append(opts.debugPrefixMaps, debug.PrefixMap{split[0], split[1]})
+
+ case args[0] == "-fdump-ssa":
+ opts.dumpSSA = true
+
+ case args[0] == "-fdump-trace":
+ opts.dumpTrace = true
+
+ case strings.HasPrefix(args[0], "-fgccgo-path="):
+ opts.gccgoPath = args[0][13:]
+
+ case strings.HasPrefix(args[0], "-fgo-pkgpath="):
+ opts.pkgpath = args[0][13:]
+
+ case strings.HasPrefix(args[0], "-fgo-relative-import-path="):
+ // TODO(pcc): Handle this.
+
+ case args[0] == "-fload-plugin":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-fload-plugin'")
+ }
+ opts.plugins = append(opts.plugins, args[1])
+ consumedArgs = 2
+
+ case args[0] == "-fno-toplevel-reorder":
+ // This is a GCC-specific code generation option. Ignore.
+
+ case args[0] == "-emit-llvm":
+ opts.emitIR = true
+
+ case args[0] == "-flto":
+ opts.lto = true
+
+ case args[0] == "-fPIC":
+ opts.pic = true
+
+ case strings.HasPrefix(args[0], "-fsanitize-blacklist="):
+ opts.sanitizer.blacklist = args[0][21:]
+
+ // TODO(pcc): Enforce mutual exclusion between sanitizers.
+
+ case args[0] == "-fsanitize=address":
+ opts.sanitizer.address = true
+
+ case args[0] == "-fsanitize=thread":
+ opts.sanitizer.thread = true
+
+ case args[0] == "-fsanitize=memory":
+ opts.sanitizer.memory = true
+
+ case args[0] == "-fsanitize=dataflow":
+ opts.sanitizer.dataflow = true
+
+ case args[0] == "-g":
+ opts.generateDebug = true
+
+ case args[0] == "-mllvm":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-mllvm'")
+ }
+ opts.llvmArgs = append(opts.llvmArgs, args[1])
+ consumedArgs = 2
+
+ case strings.HasPrefix(args[0], "-m"), args[0] == "-funsafe-math-optimizations", args[0] == "-ffp-contract=off":
+ // TODO(pcc): Handle code generation options.
+
+ case args[0] == "-no-prefix":
+ noPrefix = true
+
+ case args[0] == "-o":
+ if len(args) == 1 {
+ return opts, errors.New("missing argument after '-o'")
+ }
+ opts.output = args[1]
+ consumedArgs = 2
+
+ case args[0] == "-pie":
+ opts.pieLink = true
+
+ case args[0] == "-dumpversion",
+ args[0] == "-print-libgcc-file-name",
+ args[0] == "-print-multi-os-directory",
+ args[0] == "--version":
+ actionKind = actionPrint
+ opts.output = args[0]
+
+ case args[0] == "-static":
+ opts.staticLink = true
+
+ case args[0] == "-static-libgcc":
+ opts.staticLibgcc = true
+
+ case args[0] == "-static-libgo":
+ opts.staticLibgo = true
+
+ default:
+ return opts, fmt.Errorf("unrecognized command line option '%s'", args[0])
+ }
+
+ args = args[consumedArgs:]
+ }
+
+ if actionKind != actionPrint && len(goInputs) == 0 && !hasOtherNonFlagInputs {
+ return opts, errors.New("no input files")
+ }
+
+ if !noPrefix {
+ opts.prefix, err = getInstPrefix()
+ if err != nil {
+ return opts, err
+ }
+ }
+
+ if opts.sanitizer.crtPrefix == "" {
+ opts.sanitizer.crtPrefix = opts.prefix
+ }
+
+ if opts.sanitizer.isPIEDefault() {
+ // This should really only be turning on -fPIE, but this isn't
+ // easy to do from Go, and -fPIC is a superset of it anyway.
+ opts.pic = true
+ opts.pieLink = true
+ }
+
+ switch actionKind {
+ case actionLink:
+ if len(goInputs) != 0 {
+ opts.actions = []action{action{actionCompile, goInputs}}
+ }
+ opts.actions = append(opts.actions, action{actionLink, otherInputs})
+
+ case actionCompile, actionAssemble:
+ if len(goInputs) != 0 {
+ opts.actions = []action{action{actionKind, goInputs}}
+ }
+
+ case actionPrint:
+ opts.actions = []action{action{actionKind, nil}}
+ }
+
+ if opts.output == "" && len(opts.actions) != 0 {
+ switch actionKind {
+ case actionCompile, actionAssemble:
+ base := filepath.Base(goInputs[0])
+ base = base[0 : len(base)-3]
+ if actionKind == actionCompile {
+ opts.output = base + ".o"
+ } else {
+ opts.output = base + ".s"
+ }
+
+ case actionLink:
+ opts.output = "a.out"
+ }
+ }
+
+ return opts, nil
+}
+
+func runPasses(opts *driverOptions, tm llvm.TargetMachine, m llvm.Module) {
+ fpm := llvm.NewFunctionPassManagerForModule(m)
+ defer fpm.Dispose()
+
+ mpm := llvm.NewPassManager()
+ defer mpm.Dispose()
+
+ pmb := llvm.NewPassManagerBuilder()
+ defer pmb.Dispose()
+
+ pmb.SetOptLevel(opts.optLevel)
+ pmb.SetSizeLevel(opts.sizeLevel)
+
+ target := tm.TargetData()
+ mpm.Add(target)
+ fpm.Add(target)
+ tm.AddAnalysisPasses(mpm)
+ tm.AddAnalysisPasses(fpm)
+
+ mpm.AddVerifierPass()
+ fpm.AddVerifierPass()
+
+ pmb.Populate(mpm)
+ pmb.PopulateFunc(fpm)
+
+ if opts.optLevel == 0 {
+ // Remove references (via the descriptor) to dead functions,
+ // for compatibility with other compilers.
+ mpm.AddGlobalDCEPass()
+ }
+
+ opts.sanitizer.addPasses(mpm, fpm)
+
+ fpm.InitializeFunc()
+ for fn := m.FirstFunction(); !fn.IsNil(); fn = llvm.NextFunction(fn) {
+ fpm.RunFunc(fn)
+ }
+ fpm.FinalizeFunc()
+
+ mpm.Run(m)
+}
+
+func getMetadataSectionInlineAsm(name string) string {
+ // ELF: creates a non-allocated excluded section.
+ return ".section \"" + name + "\", \"e\"\n"
+}
+
+func getDataInlineAsm(data []byte) string {
+ edata := make([]byte, 0, len(data)*4+10)
+
+ edata = append(edata, ".ascii \""...)
+ for i := range data {
+ switch data[i] {
+ case '\000':
+ edata = append(edata, "\\000"...)
+ continue
+ case '\n':
+ edata = append(edata, "\\n"...)
+ continue
+ case '"', '\\':
+ edata = append(edata, '\\')
+ }
+ edata = append(edata, data[i])
+ }
+ edata = append(edata, "\"\n"...)
+ return string(edata)
+}
+
+// Get the lib-relative path to the standard libraries for the given driver
+// options. This is normally '.' but can vary for cross compilation, LTO,
+// sanitizers etc.
+func getVariantDir(opts *driverOptions) string {
+ switch {
+ case opts.lto:
+ return "llvm-lto.0"
+ case opts.sanitizer.address:
+ return "llvm-asan.0"
+ case opts.sanitizer.thread:
+ return "llvm-tsan.0"
+ case opts.sanitizer.memory:
+ return "llvm-msan.0"
+ case opts.sanitizer.dataflow:
+ return "llvm-dfsan.0"
+ default:
+ return "."
+ }
+}
+
+func performAction(opts *driverOptions, kind actionKind, inputs []string, output string) error {
+ switch kind {
+ case actionPrint:
+ switch opts.output {
+ case "-dumpversion":
+ fmt.Println("llgo-"+llvmVersion())
+ return nil
+ case "-print-libgcc-file-name":
+ cmd := exec.Command(opts.bprefix+"gcc", "-print-libgcc-file-name")
+ out, err := cmd.CombinedOutput()
+ os.Stdout.Write(out)
+ return err
+ case "-print-multi-os-directory":
+ fmt.Println(getVariantDir(opts))
+ return nil
+ case "--version":
+ displayVersion()
+ return nil
+ default:
+ panic("unexpected print command")
+ }
+
+ case actionCompile, actionAssemble:
+ compiler, err := initCompiler(opts)
+ if err != nil {
+ return err
+ }
+
+ module, err := compiler.Compile(inputs, opts.pkgpath)
+ if err != nil {
+ return err
+ }
+
+ defer module.Dispose()
+
+ target, err := llvm.GetTargetFromTriple(opts.triple)
+ if err != nil {
+ return err
+ }
+
+ optLevel := [...]llvm.CodeGenOptLevel{
+ llvm.CodeGenLevelNone,
+ llvm.CodeGenLevelLess,
+ llvm.CodeGenLevelDefault,
+ llvm.CodeGenLevelAggressive,
+ }[opts.optLevel]
+
+ relocMode := llvm.RelocStatic
+ if opts.pic {
+ relocMode = llvm.RelocPIC
+ }
+
+ tm := target.CreateTargetMachine(opts.triple, "", "", optLevel,
+ relocMode, llvm.CodeModelDefault)
+ defer tm.Dispose()
+
+ runPasses(opts, tm, module.Module)
+
+ var file *os.File
+ if output == "-" {
+ file = os.Stdout
+ } else {
+ file, err = os.Create(output)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ }
+
+ switch {
+ case !opts.lto && !opts.emitIR:
+ if module.ExportData != nil {
+ asm := getMetadataSectionInlineAsm(".go_export")
+ asm += getDataInlineAsm(module.ExportData)
+ module.Module.SetInlineAsm(asm)
+ }
+
+ fileType := llvm.AssemblyFile
+ if kind == actionCompile {
+ fileType = llvm.ObjectFile
+ }
+ mb, err := tm.EmitToMemoryBuffer(module.Module, fileType)
+ if err != nil {
+ return err
+ }
+ defer mb.Dispose()
+
+ bytes := mb.Bytes()
+ _, err = file.Write(bytes)
+ return err
+
+ case opts.lto:
+ bcmb := llvm.WriteBitcodeToMemoryBuffer(module.Module)
+ defer bcmb.Dispose()
+
+ // This is a bit of a hack. We just want an object file
+ // containing some metadata sections. This might be simpler
+ // if we had bindings for the MC library, but for now we create
+ // a fresh module containing only inline asm that creates the
+ // sections.
+ outmodule := llvm.NewModule("")
+ defer outmodule.Dispose()
+ asm := getMetadataSectionInlineAsm(".llvmbc")
+ asm += getDataInlineAsm(bcmb.Bytes())
+ if module.ExportData != nil {
+ asm += getMetadataSectionInlineAsm(".go_export")
+ asm += getDataInlineAsm(module.ExportData)
+ }
+ outmodule.SetInlineAsm(asm)
+
+ fileType := llvm.AssemblyFile
+ if kind == actionCompile {
+ fileType = llvm.ObjectFile
+ }
+ mb, err := tm.EmitToMemoryBuffer(outmodule, fileType)
+ if err != nil {
+ return err
+ }
+ defer mb.Dispose()
+
+ bytes := mb.Bytes()
+ _, err = file.Write(bytes)
+ return err
+
+ case kind == actionCompile:
+ err := llvm.WriteBitcodeToFile(module.Module, file)
+ return err
+
+ case kind == actionAssemble:
+ _, err := file.WriteString(module.Module.String())
+ return err
+
+ default:
+ panic("unexpected action kind")
+ }
+
+ case actionLink:
+ // TODO(pcc): Teach this to do LTO.
+ args := []string{"-o", output}
+ if opts.pic {
+ args = append(args, "-fPIC")
+ }
+ if opts.pieLink {
+ args = append(args, "-pie")
+ }
+ if opts.staticLink {
+ args = append(args, "-static")
+ }
+ if opts.staticLibgcc {
+ args = append(args, "-static-libgcc")
+ }
+ for _, p := range opts.libPaths {
+ args = append(args, "-L", p)
+ }
+ for _, p := range opts.importPaths {
+ args = append(args, "-I", p)
+ }
+ args = append(args, inputs...)
+ var linkerPath string
+ if opts.gccgoPath == "" {
+ // TODO(pcc): See if we can avoid calling gcc here.
+ // We currently rely on it to find crt*.o and compile
+ // any C source files passed as arguments.
+ linkerPath = opts.bprefix + "gcc"
+
+ if opts.prefix != "" {
+ libdir := filepath.Join(opts.prefix, "lib", getVariantDir(opts))
+ args = append(args, "-L", libdir)
+ if !opts.staticLibgo {
+ args = append(args, "-Wl,-rpath,"+libdir)
+ }
+ }
+
+ args = append(args, "-lgobegin-llgo")
+ if opts.staticLibgo {
+ args = append(args, "-Wl,-Bstatic", "-lgo-llgo", "-Wl,-Bdynamic", "-lpthread", "-lm")
+ } else {
+ args = append(args, "-lgo-llgo")
+ }
+ } else {
+ linkerPath = opts.gccgoPath
+ if opts.staticLibgo {
+ args = append(args, "-static-libgo")
+ }
+ }
+
+ args = opts.sanitizer.addLibs(opts.triple, args)
+
+ cmd := exec.Command(linkerPath, args...)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ os.Stderr.Write(out)
+ }
+ return err
+
+ default:
+ panic("unexpected action kind")
+ }
+}
+
+func performActions(opts *driverOptions) error {
+ var extraInput string
+
+ for _, plugin := range opts.plugins {
+ err := llvm.LoadLibraryPermanently(plugin)
+ if err != nil {
+ return err
+ }
+ }
+
+ llvm.ParseCommandLineOptions(append([]string{"llgo"}, opts.llvmArgs...), "llgo (LLVM option parsing)\n")
+
+ for i, action := range opts.actions {
+ var output string
+ if i == len(opts.actions)-1 {
+ output = opts.output
+ } else {
+ tmpfile, err := ioutil.TempFile("", "llgo")
+ if err != nil {
+ return err
+ }
+ output = tmpfile.Name() + ".o"
+ tmpfile.Close()
+ err = os.Remove(tmpfile.Name())
+ if err != nil {
+ return err
+ }
+ defer os.Remove(output)
+ }
+
+ inputs := action.inputs
+ if extraInput != "" {
+ inputs = append([]string{extraInput}, inputs...)
+ }
+
+ err := performAction(opts, action.kind, inputs, output)
+ if err != nil {
+ return err
+ }
+
+ extraInput = output
+ }
+
+ return nil
+}
+
+func main() {
+ llvm.InitializeAllTargets()
+ llvm.InitializeAllTargetMCs()
+ llvm.InitializeAllTargetInfos()
+ llvm.InitializeAllAsmParsers()
+ llvm.InitializeAllAsmPrinters()
+
+ opts, err := parseArguments(os.Args[1:])
+ if err != nil {
+ report(err)
+ os.Exit(1)
+ }
+
+ err = performActions(&opts)
+ if err != nil {
+ report(err)
+ os.Exit(1)
+ }
+}
diff --git a/debug/debug.go b/debug/debug.go
new file mode 100644
index 0000000..fcf1607
--- /dev/null
+++ b/debug/debug.go
@@ -0,0 +1,460 @@
+//===- debug.go - debug info builder --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This package builds LLVM debug info from go/* data structures.
+//
+//===----------------------------------------------------------------------===//
+
+package debug
+
+import (
+ "debug/dwarf"
+ "fmt"
+ "go/token"
+ "os"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+const (
+ // non-standard debug metadata tags
+ tagAutoVariable dwarf.Tag = 0x100
+ tagArgVariable dwarf.Tag = 0x101
+)
+
+type PrefixMap struct {
+ Source, Replacement string
+}
+
+// DIBuilder builds debug metadata for Go programs.
+type DIBuilder struct {
+ // builder is the current builder; there is one per CU.
+ builder *llvm.DIBuilder
+ module llvm.Module
+ files map[*token.File]llvm.Value
+ cu, fn, lb llvm.Value
+ fnFile string
+ debugScope []llvm.Value
+ sizes types.Sizes
+ fset *token.FileSet
+ prefixMaps []PrefixMap
+ types typeutil.Map
+ voidType llvm.Value
+}
+
+// NewDIBuilder creates a new debug information builder.
+func NewDIBuilder(sizes types.Sizes, module llvm.Module, fset *token.FileSet, prefixMaps []PrefixMap) *DIBuilder {
+ var d DIBuilder
+ d.module = module
+ d.files = make(map[*token.File]llvm.Value)
+ d.sizes = sizes
+ d.fset = fset
+ d.prefixMaps = prefixMaps
+ d.builder = llvm.NewDIBuilder(d.module)
+ d.cu = d.createCompileUnit()
+ return &d
+}
+
+// Destroy destroys the DIBuilder.
+func (d *DIBuilder) Destroy() {
+ d.builder.Destroy()
+}
+
+func (d *DIBuilder) scope() llvm.Value {
+ if d.lb.C != nil {
+ return d.lb
+ }
+ if d.fn.C != nil {
+ return d.fn
+ }
+ return d.cu
+}
+
+func (d *DIBuilder) remapFilePath(path string) string {
+ for _, pm := range d.prefixMaps {
+ if strings.HasPrefix(path, pm.Source) {
+ return pm.Replacement + path[len(pm.Source):]
+ }
+ }
+ return path
+}
+
+func (d *DIBuilder) getFile(file *token.File) llvm.Value {
+ if diFile := d.files[file]; !diFile.IsNil() {
+ return diFile
+ }
+ diFile := d.builder.CreateFile(d.remapFilePath(file.Name()), "")
+ d.files[file] = diFile
+ return diFile
+}
+
+// createCompileUnit creates and returns debug metadata for the compile
+// unit as a whole, using the first file in the file set as a representative
+// (the choice of file is arbitrary).
+func (d *DIBuilder) createCompileUnit() llvm.Value {
+ var file *token.File
+ d.fset.Iterate(func(f *token.File) bool {
+ file = f
+ return false
+ })
+ dir, err := os.Getwd()
+ if err != nil {
+ panic("could not get current directory: " + err.Error())
+ }
+ return d.builder.CreateCompileUnit(llvm.DICompileUnit{
+ Language: llvm.DW_LANG_Go,
+ File: d.remapFilePath(file.Name()),
+ Dir: dir,
+ Producer: "llgo",
+ })
+}
+
+// PushFunction creates debug metadata for the specified function,
+// and pushes it onto the scope stack.
+func (d *DIBuilder) PushFunction(fnptr llvm.Value, sig *types.Signature, pos token.Pos) {
+ var diFile llvm.Value
+ var line int
+ if file := d.fset.File(pos); file != nil {
+ d.fnFile = file.Name()
+ diFile = d.getFile(file)
+ line = file.Line(pos)
+ }
+ d.fn = d.builder.CreateFunction(d.scope(), llvm.DIFunction{
+ Name: fnptr.Name(), // TODO(axw) unmangled name?
+ LinkageName: fnptr.Name(),
+ File: diFile,
+ Line: line,
+ Type: d.DIType(sig),
+ IsDefinition: true,
+ Function: fnptr,
+ })
+}
+
+// PopFunction pops the previously pushed function off the scope stack.
+func (d *DIBuilder) PopFunction() {
+ d.lb = llvm.Value{nil}
+ d.fn = llvm.Value{nil}
+ d.fnFile = ""
+}
+
+// Declare creates an llvm.dbg.declare call for the specified function
+// parameter or local variable.
+func (d *DIBuilder) Declare(b llvm.Builder, v ssa.Value, llv llvm.Value, paramIndex int) {
+ tag := tagAutoVariable
+ if paramIndex >= 0 {
+ tag = tagArgVariable
+ }
+ var diFile llvm.Value
+ var line int
+ if file := d.fset.File(v.Pos()); file != nil {
+ line = file.Line(v.Pos())
+ diFile = d.getFile(file)
+ }
+ localVar := d.builder.CreateLocalVariable(d.scope(), llvm.DILocalVariable{
+ Tag: tag,
+ Name: llv.Name(),
+ File: diFile,
+ Line: line,
+ ArgNo: paramIndex + 1,
+ Type: d.DIType(v.Type()),
+ })
+ expr := d.builder.CreateExpression(nil)
+ d.builder.InsertDeclareAtEnd(llv, localVar, expr, b.GetInsertBlock())
+}
+
+// Value creates an llvm.dbg.value call for the specified register value.
+func (d *DIBuilder) Value(b llvm.Builder, v ssa.Value, llv llvm.Value, paramIndex int) {
+ // TODO(axw)
+}
+
+// SetLocation sets the current debug location.
+func (d *DIBuilder) SetLocation(b llvm.Builder, pos token.Pos) {
+ if !pos.IsValid() {
+ return
+ }
+ position := d.fset.Position(pos)
+ d.lb = llvm.Value{nil}
+ if position.Filename != d.fnFile && position.Filename != "" {
+ // This can happen rarely, e.g. in init functions.
+ diFile := d.builder.CreateFile(d.remapFilePath(position.Filename), "")
+ d.lb = d.builder.CreateLexicalBlockFile(d.scope(), diFile, 0)
+ }
+ b.SetCurrentDebugLocation(llvm.MDNode([]llvm.Value{
+ llvm.ConstInt(llvm.Int32Type(), uint64(position.Line), false),
+ llvm.ConstInt(llvm.Int32Type(), uint64(position.Column), false),
+ d.scope(),
+ llvm.Value{},
+ }))
+}
+
+// Finalize must be called after all compilation units are translated,
+// generating the final debug metadata for the module.
+func (d *DIBuilder) Finalize() {
+ d.module.AddNamedMetadataOperand(
+ "llvm.module.flags",
+ llvm.MDNode([]llvm.Value{
+ llvm.ConstInt(llvm.Int32Type(), 2, false), // Warn on mismatch
+ llvm.MDString("Dwarf Version"),
+ llvm.ConstInt(llvm.Int32Type(), 4, false),
+ }),
+ )
+ d.module.AddNamedMetadataOperand(
+ "llvm.module.flags",
+ llvm.MDNode([]llvm.Value{
+ llvm.ConstInt(llvm.Int32Type(), 1, false), // Error on mismatch
+ llvm.MDString("Debug Info Version"),
+ llvm.ConstInt(llvm.Int32Type(), 1, false),
+ }),
+ )
+ d.builder.Finalize()
+}
+
+// DIType maps a Go type to DIType debug metadata value.
+func (d *DIBuilder) DIType(t types.Type) llvm.Value {
+ return d.typeDebugDescriptor(t, types.TypeString(nil, t))
+}
+
+func (d *DIBuilder) typeDebugDescriptor(t types.Type, name string) llvm.Value {
+ // Signature needs to be handled specially, to preprocess
+ // methods, moving the receiver to the parameter list.
+ if t, ok := t.(*types.Signature); ok {
+ return d.descriptorSignature(t, name)
+ }
+ if t == nil {
+ if d.voidType.IsNil() {
+ d.voidType = d.builder.CreateBasicType(llvm.DIBasicType{Name: "void"})
+ }
+ return d.voidType
+ }
+ if dt, ok := d.types.At(t).(llvm.Value); ok {
+ return dt
+ }
+ dt := d.descriptor(t, name)
+ d.types.Set(t, dt)
+ return dt
+}
+
+func (d *DIBuilder) descriptor(t types.Type, name string) llvm.Value {
+ switch t := t.(type) {
+ case *types.Basic:
+ return d.descriptorBasic(t, name)
+ case *types.Pointer:
+ return d.descriptorPointer(t)
+ case *types.Struct:
+ return d.descriptorStruct(t, name)
+ case *types.Named:
+ return d.descriptorNamed(t)
+ case *types.Array:
+ return d.descriptorArray(t, name)
+ case *types.Slice:
+ return d.descriptorSlice(t, name)
+ case *types.Map:
+ return d.descriptorMap(t, name)
+ case *types.Chan:
+ return d.descriptorChan(t, name)
+ case *types.Interface:
+ return d.descriptorInterface(t, name)
+ default:
+ panic(fmt.Sprintf("unhandled type: %T", t))
+ }
+}
+
+func (d *DIBuilder) descriptorBasic(t *types.Basic, name string) llvm.Value {
+ switch t.Kind() {
+ case types.String:
+ return d.typeDebugDescriptor(types.NewStruct([]*types.Var{
+ types.NewVar(0, nil, "ptr", types.NewPointer(types.Typ[types.Uint8])),
+ types.NewVar(0, nil, "len", types.Typ[types.Int]),
+ }, nil), name)
+ case types.UnsafePointer:
+ return d.builder.CreateBasicType(llvm.DIBasicType{
+ Name: name,
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ Encoding: llvm.DW_ATE_unsigned,
+ })
+ default:
+ bt := llvm.DIBasicType{
+ Name: t.String(),
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ }
+ switch bi := t.Info(); {
+ case bi&types.IsBoolean != 0:
+ bt.Encoding = llvm.DW_ATE_boolean
+ case bi&types.IsUnsigned != 0:
+ bt.Encoding = llvm.DW_ATE_unsigned
+ case bi&types.IsInteger != 0:
+ bt.Encoding = llvm.DW_ATE_signed
+ case bi&types.IsFloat != 0:
+ bt.Encoding = llvm.DW_ATE_float
+ case bi&types.IsComplex != 0:
+ bt.Encoding = llvm.DW_ATE_imaginary_float
+ case bi&types.IsUnsigned != 0:
+ bt.Encoding = llvm.DW_ATE_unsigned
+ default:
+ panic(fmt.Sprintf("unhandled: %#v", t))
+ }
+ return d.builder.CreateBasicType(bt)
+ }
+}
+
+func (d *DIBuilder) descriptorPointer(t *types.Pointer) llvm.Value {
+ return d.builder.CreatePointerType(llvm.DIPointerType{
+ Pointee: d.DIType(t.Elem()),
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ })
+}
+
+func (d *DIBuilder) descriptorStruct(t *types.Struct, name string) llvm.Value {
+ fields := make([]*types.Var, t.NumFields())
+ for i := range fields {
+ fields[i] = t.Field(i)
+ }
+ offsets := d.sizes.Offsetsof(fields)
+ members := make([]llvm.Value, len(fields))
+ for i, f := range fields {
+ // TODO(axw) file/line where member is defined.
+ t := f.Type()
+ members[i] = d.builder.CreateMemberType(d.cu, llvm.DIMemberType{
+ Name: f.Name(),
+ Type: d.DIType(t),
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ OffsetInBits: uint64(offsets[i] * 8),
+ })
+ }
+ // TODO(axw) file/line where struct is defined.
+ return d.builder.CreateStructType(d.cu, llvm.DIStructType{
+ Name: name,
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ Elements: members,
+ })
+}
+
+func (d *DIBuilder) descriptorNamed(t *types.Named) llvm.Value {
+ // Create a placeholder for the named type, to terminate cycles.
+ placeholder := llvm.MDNode(nil)
+ d.types.Set(t, placeholder)
+ var diFile llvm.Value
+ var line int
+ if file := d.fset.File(t.Obj().Pos()); file != nil {
+ line = file.Line(t.Obj().Pos())
+ diFile = d.getFile(file)
+ }
+ typedef := d.builder.CreateTypedef(llvm.DITypedef{
+ Type: d.DIType(t.Underlying()),
+ Name: t.Obj().Name(),
+ File: diFile,
+ Line: line,
+ })
+ placeholder.ReplaceAllUsesWith(typedef)
+ return typedef
+}
+
+func (d *DIBuilder) descriptorArray(t *types.Array, name string) llvm.Value {
+ return d.builder.CreateArrayType(llvm.DIArrayType{
+ SizeInBits: uint64(d.sizes.Sizeof(t) * 8),
+ AlignInBits: uint64(d.sizes.Alignof(t) * 8),
+ ElementType: d.DIType(t.Elem()),
+ Subscripts: []llvm.DISubrange{{Count: t.Len()}},
+ })
+}
+
+func (d *DIBuilder) descriptorSlice(t *types.Slice, name string) llvm.Value {
+ sliceStruct := types.NewStruct([]*types.Var{
+ types.NewVar(0, nil, "ptr", types.NewPointer(t.Elem())),
+ types.NewVar(0, nil, "len", types.Typ[types.Int]),
+ types.NewVar(0, nil, "cap", types.Typ[types.Int]),
+ }, nil)
+ return d.typeDebugDescriptor(sliceStruct, name)
+}
+
+func (d *DIBuilder) descriptorMap(t *types.Map, name string) llvm.Value {
+ // FIXME: This should be DW_TAG_pointer_type to __go_map.
+ return d.descriptorBasic(types.Typ[types.Uintptr], name)
+}
+
+func (d *DIBuilder) descriptorChan(t *types.Chan, name string) llvm.Value {
+ // FIXME: This should be DW_TAG_pointer_type to __go_channel.
+ return d.descriptorBasic(types.Typ[types.Uintptr], name)
+}
+
+func (d *DIBuilder) descriptorInterface(t *types.Interface, name string) llvm.Value {
+ ifaceStruct := types.NewStruct([]*types.Var{
+ types.NewVar(0, nil, "type", types.NewPointer(types.Typ[types.Uint8])),
+ types.NewVar(0, nil, "data", types.NewPointer(types.Typ[types.Uint8])),
+ }, nil)
+ return d.typeDebugDescriptor(ifaceStruct, name)
+}
+
+func (d *DIBuilder) descriptorSignature(t *types.Signature, name string) llvm.Value {
+ // If there's a receiver change the receiver to an
+ // additional (first) parameter, and take the value of
+ // the resulting signature instead.
+ if recv := t.Recv(); recv != nil {
+ params := t.Params()
+ paramvars := make([]*types.Var, int(params.Len()+1))
+ paramvars[0] = recv
+ for i := 0; i < int(params.Len()); i++ {
+ paramvars[i+1] = params.At(i)
+ }
+ params = types.NewTuple(paramvars...)
+ t := types.NewSignature(nil, nil, params, t.Results(), t.Variadic())
+ return d.typeDebugDescriptor(t, name)
+ }
+ if dt, ok := d.types.At(t).(llvm.Value); ok {
+ return dt
+ }
+
+ var returnType llvm.Value
+ results := t.Results()
+ switch n := results.Len(); n {
+ case 0:
+ returnType = d.DIType(nil) // void
+ case 1:
+ returnType = d.DIType(results.At(0).Type())
+ default:
+ fields := make([]*types.Var, results.Len())
+ for i := range fields {
+ f := results.At(i)
+ // Structs may not have multiple fields
+ // with the same name, excepting "_".
+ if f.Name() == "" {
+ f = types.NewVar(f.Pos(), f.Pkg(), "_", f.Type())
+ }
+ fields[i] = f
+ }
+ returnType = d.typeDebugDescriptor(types.NewStruct(fields, nil), "")
+ }
+
+ var paramTypes []llvm.Value
+ params := t.Params()
+ if params != nil && params.Len() > 0 {
+ paramTypes = make([]llvm.Value, params.Len()+1)
+ paramTypes[0] = returnType
+ for i := range paramTypes[1:] {
+ paramTypes[i+1] = d.DIType(params.At(i).Type())
+ }
+ } else {
+ paramTypes = []llvm.Value{returnType}
+ }
+
+ // TODO(axw) get position of type definition for File field
+ return d.builder.CreateSubroutineType(llvm.DISubroutineType{
+ Parameters: paramTypes,
+ })
+}
diff --git a/include/dwarf2.h b/include/dwarf2.h
new file mode 100644
index 0000000..9288226
--- /dev/null
+++ b/include/dwarf2.h
@@ -0,0 +1,94 @@
+//===----------------------------- dwarf2.h -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+// DWARF constants. Derived from:
+// - libcxxabi/src/Unwind/dwarf2.h
+// - DWARF 4 specification
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DWARF2_H
+#define DWARF2_H
+
+enum dwarf_attribute {
+ DW_AT_name = 0x03,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_specification = 0x47,
+ DW_AT_ranges = 0x55,
+ DW_AT_call_file = 0x58,
+ DW_AT_call_line = 0x59,
+ DW_AT_linkage_name = 0x6e,
+ DW_AT_MIPS_linkage_name = 0x2007
+};
+
+enum dwarf_form {
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16,
+ DW_FORM_sec_offset = 0x17,
+ DW_FORM_exprloc = 0x18,
+ DW_FORM_flag_present = 0x19,
+ DW_FORM_ref_sig8 = 0x20,
+ DW_FORM_GNU_addr_index = 0x1f01,
+ DW_FORM_GNU_str_index = 0x1f02,
+ DW_FORM_GNU_ref_alt = 0x1f20,
+ DW_FORM_GNU_strp_alt = 0x1f21
+};
+
+enum dwarf_tag {
+ DW_TAG_entry_point = 0x03,
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_inlined_subroutine = 0x1d,
+ DW_TAG_subprogram = 0x2e
+};
+
+enum dwarf_lns {
+ DW_LNS_extended_op = 0x00,
+ DW_LNS_copy = 0x01,
+ DW_LNS_advance_pc = 0x02,
+ DW_LNS_advance_line = 0x03,
+ DW_LNS_set_file = 0x04,
+ DW_LNS_set_column = 0x05,
+ DW_LNS_negate_stmt = 0x06,
+ DW_LNS_set_basic_block = 0x07,
+ DW_LNS_const_add_pc = 0x08,
+ DW_LNS_fixed_advance_pc = 0x09,
+ DW_LNS_set_prologue_end = 0x0a,
+ DW_LNS_set_epilogue_begin = 0x0b,
+ DW_LNS_set_isa = 0x0c
+};
+
+enum dwarf_lne {
+ DW_LNE_end_sequence = 0x01,
+ DW_LNE_set_address = 0x02,
+ DW_LNE_define_file = 0x03,
+ DW_LNE_set_discriminator = 0x04
+};
+
+#endif // DWARF2_H
diff --git a/include/filenames.h b/include/filenames.h
new file mode 100644
index 0000000..726b6ad
--- /dev/null
+++ b/include/filenames.h
@@ -0,0 +1,15 @@
+//===----------------------------- filenames.h ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FILENAMES_H
+#define FILENAMES_H
+
+#define IS_ABSOLUTE_PATH(path) ((path)[0] == '/')
+
+#endif
diff --git a/include/unwind-pe.h b/include/unwind-pe.h
new file mode 100644
index 0000000..a460d55
--- /dev/null
+++ b/include/unwind-pe.h
@@ -0,0 +1,201 @@
+//===----------------------------- unwind-pe.h ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+// Pointer-Encoding decoder. Derived from:
+// - libcxxabi/src/Unwind/dwarf2.h
+// - libcxxabi/src/Unwind/AddressSpace.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UNWIND_PE_H
+#define UNWIND_PE_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+// FSF exception handling Pointer-Encoding constants
+// Used in CFI augmentation by GCC
+enum {
+ DW_EH_PE_ptr = 0x00,
+ DW_EH_PE_uleb128 = 0x01,
+ DW_EH_PE_udata2 = 0x02,
+ DW_EH_PE_udata4 = 0x03,
+ DW_EH_PE_udata8 = 0x04,
+ DW_EH_PE_signed = 0x08,
+ DW_EH_PE_sleb128 = 0x09,
+ DW_EH_PE_sdata2 = 0x0A,
+ DW_EH_PE_sdata4 = 0x0B,
+ DW_EH_PE_sdata8 = 0x0C,
+ DW_EH_PE_absptr = 0x00,
+ DW_EH_PE_pcrel = 0x10,
+ DW_EH_PE_textrel = 0x20,
+ DW_EH_PE_datarel = 0x30,
+ DW_EH_PE_funcrel = 0x40,
+ DW_EH_PE_aligned = 0x50,
+ DW_EH_PE_indirect = 0x80,
+ DW_EH_PE_omit = 0xFF
+};
+
+/// Read a ULEB128 into a 64-bit word.
+static uint64_t unw_getULEB128(uintptr_t *addr) {
+ const uint8_t *p = (uint8_t *)*addr;
+ uint64_t result = 0;
+ int bit = 0;
+ do {
+ uint64_t b;
+
+ b = *p & 0x7f;
+
+ if (bit >= 64 || b << bit >> bit != b) {
+ assert(!"malformed uleb128 expression");
+ } else {
+ result |= b << bit;
+ bit += 7;
+ }
+ } while (*p++ >= 0x80);
+ *addr = (uintptr_t) p;
+ return result;
+}
+
+/// Read a SLEB128 into a 64-bit word.
+static int64_t unw_getSLEB128(uintptr_t *addr) {
+ const uint8_t *p = (uint8_t *)addr;
+ int64_t result = 0;
+ int bit = 0;
+ uint8_t byte;
+ do {
+ byte = *p++;
+ result |= ((byte & 0x7f) << bit);
+ bit += 7;
+ } while (byte & 0x80);
+ // sign extend negative numbers
+ if ((byte & 0x40) != 0)
+ result |= (-1LL) << bit;
+ *addr = (uintptr_t) p;
+ return result;
+}
+
+static uint16_t unw_get16(uintptr_t addr) {
+ uint16_t val;
+ memcpy(&val, (void *)addr, sizeof(val));
+ return val;
+}
+
+static uint32_t unw_get32(uintptr_t addr) {
+ uint32_t val;
+ memcpy(&val, (void *)addr, sizeof(val));
+ return val;
+}
+
+static uint64_t unw_get64(uintptr_t addr) {
+ uint64_t val;
+ memcpy(&val, (void *)addr, sizeof(val));
+ return val;
+}
+
+static uintptr_t unw_getP(uintptr_t addr) {
+ if (sizeof(uintptr_t) == 8)
+ return unw_get64(addr);
+ else
+ return unw_get32(addr);
+}
+
+static const unsigned char *read_uleb128(const unsigned char *p,
+ _uleb128_t *ret) {
+ uintptr_t addr = (uintptr_t)p;
+ *ret = unw_getULEB128(&addr);
+ return (unsigned char *)addr;
+}
+
+static const unsigned char *read_encoded_value(struct _Unwind_Context *ctx,
+ unsigned char encoding,
+ const unsigned char *p,
+ _Unwind_Ptr *ret) {
+ uintptr_t addr = (uintptr_t)p;
+ uintptr_t startAddr = addr;
+ uintptr_t result;
+
+ (void)ctx;
+
+ // first get value
+ switch (encoding & 0x0F) {
+ case DW_EH_PE_ptr:
+ result = unw_getP(addr);
+ p += sizeof(uintptr_t);
+ break;
+ case DW_EH_PE_uleb128:
+ result = (uintptr_t)unw_getULEB128(&addr);
+ p = (const unsigned char *)addr;
+ break;
+ case DW_EH_PE_udata2:
+ result = unw_get16(addr);
+ p += 2;
+ break;
+ case DW_EH_PE_udata4:
+ result = unw_get32(addr);
+ p += 4;
+ break;
+ case DW_EH_PE_udata8:
+ result = (uintptr_t)unw_get64(addr);
+ p += 8;
+ break;
+ case DW_EH_PE_sleb128:
+ result = (uintptr_t)unw_getSLEB128(&addr);
+ p = (const unsigned char *)addr;
+ break;
+ case DW_EH_PE_sdata2:
+ // Sign extend from signed 16-bit value.
+ result = (uintptr_t)(int16_t)unw_get16(addr);
+ p += 2;
+ break;
+ case DW_EH_PE_sdata4:
+ // Sign extend from signed 32-bit value.
+ result = (uintptr_t)(int32_t)unw_get32(addr);
+ p += 4;
+ break;
+ case DW_EH_PE_sdata8:
+ result = (uintptr_t)unw_get64(addr);
+ p += 8;
+ break;
+ default:
+ assert(!"unknown pointer encoding");
+ }
+
+ // then add relative offset
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ // do nothing
+ break;
+ case DW_EH_PE_pcrel:
+ result += startAddr;
+ break;
+ case DW_EH_PE_textrel:
+ assert(!"DW_EH_PE_textrel pointer encoding not supported");
+ break;
+ case DW_EH_PE_datarel:
+ assert(!"DW_EH_PE_datarel pointer encoding not supported");
+ break;
+ case DW_EH_PE_funcrel:
+ assert(!"DW_EH_PE_funcrel pointer encoding not supported");
+ break;
+ case DW_EH_PE_aligned:
+ assert(!"DW_EH_PE_aligned pointer encoding not supported");
+ break;
+ default:
+ assert(!"unknown pointer encoding");
+ break;
+ }
+
+ if (encoding & DW_EH_PE_indirect)
+ result = unw_getP(result);
+
+ *ret = result;
+ return p;
+}
+
+#endif // UNWIND_PE_H
diff --git a/irgen/annotations.go b/irgen/annotations.go
new file mode 100644
index 0000000..d88bb80
--- /dev/null
+++ b/irgen/annotations.go
@@ -0,0 +1,64 @@
+//===- annotations.go - annotation processor ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file converts llgo annotations into attributes.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "go/ast"
+ "go/token"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// processAnnotations takes an *ssa.Package and a
+// *importer.PackageInfo, and processes all of the
+// llgo source annotations attached to each top-level
+// function and global variable.
+func (c *compiler) processAnnotations(u *unit, pkginfo *loader.PackageInfo) {
+ members := make(map[types.Object]llvm.Value, len(u.globals))
+ for k, v := range u.globals {
+ members[k.(ssa.Member).Object()] = v
+ }
+ applyAttributes := func(attrs []Attribute, idents ...*ast.Ident) {
+ if len(attrs) == 0 {
+ return
+ }
+ for _, ident := range idents {
+ if v := members[pkginfo.ObjectOf(ident)]; !v.IsNil() {
+ for _, attr := range attrs {
+ attr.Apply(v)
+ }
+ }
+ }
+ }
+ for _, f := range pkginfo.Files {
+ for _, decl := range f.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ attrs := parseAttributes(decl.Doc)
+ applyAttributes(attrs, decl.Name)
+ case *ast.GenDecl:
+ if decl.Tok != token.VAR {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ varspec := spec.(*ast.ValueSpec)
+ attrs := parseAttributes(decl.Doc)
+ applyAttributes(attrs, varspec.Names...)
+ }
+ }
+ }
+ }
+}
diff --git a/irgen/attribute.go b/irgen/attribute.go
new file mode 100644
index 0000000..c9d5393
--- /dev/null
+++ b/irgen/attribute.go
@@ -0,0 +1,177 @@
+//===- attribute.go - attribute processor ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file processes llgo and //extern attributes.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+ "go/ast"
+ "llvm.org/llvm/bindings/go/llvm"
+ "strings"
+)
+
+const AttributeCommentPrefix = "#llgo "
+
+// Attribute represents an attribute associated with a
+// global variable or function.
+type Attribute interface {
+ Apply(llvm.Value)
+}
+
+// parseAttribute parses zero or more #llgo comment attributes associated with
+// a global variable or function. The comment group provided will be processed
+// one line at a time using parseAttribute.
+func parseAttributes(doc *ast.CommentGroup) []Attribute {
+ var attributes []Attribute
+ if doc == nil {
+ return attributes
+ }
+ for _, comment := range doc.List {
+ if strings.HasPrefix(comment.Text, "//extern ") {
+ nameattr := nameAttribute(strings.TrimSpace(comment.Text[9:]))
+ attributes = append(attributes, nameattr)
+ continue
+ }
+ text := comment.Text[2:]
+ if strings.HasPrefix(comment.Text, "/*") {
+ text = text[:len(text)-2]
+ }
+ attr := parseAttribute(strings.TrimSpace(text))
+ if attr != nil {
+ attributes = append(attributes, attr)
+ }
+ }
+ return attributes
+}
+
+// parseAttribute parses a single #llgo comment attribute associated with
+// a global variable or function. The string provided will be parsed
+// if it begins with AttributeCommentPrefix, otherwise nil is returned.
+func parseAttribute(line string) Attribute {
+ if !strings.HasPrefix(line, AttributeCommentPrefix) {
+ return nil
+ }
+ line = strings.TrimSpace(line[len(AttributeCommentPrefix):])
+ colon := strings.IndexRune(line, ':')
+ var key, value string
+ if colon == -1 {
+ key = line
+ } else {
+ key, value = line[:colon], line[colon+1:]
+ }
+ switch key {
+ case "linkage":
+ return parseLinkageAttribute(value)
+ case "name":
+ return nameAttribute(strings.TrimSpace(value))
+ case "attr":
+ return parseLLVMAttribute(strings.TrimSpace(value))
+ case "thread_local":
+ return tlsAttribute{}
+ default:
+ // FIXME decide what to do here. return error? log warning?
+ panic("unknown attribute key: " + key)
+ }
+ return nil
+}
+
+type linkageAttribute llvm.Linkage
+
+func (a linkageAttribute) Apply(v llvm.Value) {
+ v.SetLinkage(llvm.Linkage(a))
+}
+
+func parseLinkageAttribute(value string) linkageAttribute {
+ var result linkageAttribute
+ value = strings.Replace(value, ",", " ", -1)
+ for _, field := range strings.Fields(value) {
+ switch strings.ToLower(field) {
+ case "private":
+ result |= linkageAttribute(llvm.PrivateLinkage)
+ case "internal":
+ result |= linkageAttribute(llvm.InternalLinkage)
+ case "available_externally":
+ result |= linkageAttribute(llvm.AvailableExternallyLinkage)
+ case "linkonce":
+ result |= linkageAttribute(llvm.LinkOnceAnyLinkage)
+ case "common":
+ result |= linkageAttribute(llvm.CommonLinkage)
+ case "weak":
+ result |= linkageAttribute(llvm.WeakAnyLinkage)
+ case "appending":
+ result |= linkageAttribute(llvm.AppendingLinkage)
+ case "extern_weak":
+ result |= linkageAttribute(llvm.ExternalWeakLinkage)
+ case "linkonce_odr":
+ result |= linkageAttribute(llvm.LinkOnceODRLinkage)
+ case "weak_odr":
+ result |= linkageAttribute(llvm.WeakODRLinkage)
+ case "external":
+ result |= linkageAttribute(llvm.ExternalLinkage)
+ }
+ }
+ return result
+}
+
+type nameAttribute string
+
+func (a nameAttribute) Apply(v llvm.Value) {
+ if !v.IsAFunction().IsNil() {
+ name := string(a)
+ curr := v.GlobalParent().NamedFunction(name)
+ if !curr.IsNil() && curr != v {
+ if curr.BasicBlocksCount() != 0 {
+ panic(fmt.Errorf("Want to take the name %s from a function that has a body!", name))
+ }
+ curr.SetName(name + "_llgo_replaced")
+ curr.ReplaceAllUsesWith(llvm.ConstBitCast(v, curr.Type()))
+ }
+ v.SetName(name)
+ } else {
+ v.SetName(string(a))
+ }
+}
+
+func parseLLVMAttribute(value string) llvmAttribute {
+ var result llvmAttribute
+ value = strings.Replace(value, ",", " ", -1)
+ for _, field := range strings.Fields(value) {
+ switch strings.ToLower(field) {
+ case "noreturn":
+ result |= llvmAttribute(llvm.NoReturnAttribute)
+ case "nounwind":
+ result |= llvmAttribute(llvm.NoUnwindAttribute)
+ case "noinline":
+ result |= llvmAttribute(llvm.NoInlineAttribute)
+ case "alwaysinline":
+ result |= llvmAttribute(llvm.AlwaysInlineAttribute)
+ }
+ }
+ return result
+}
+
+type llvmAttribute llvm.Attribute
+
+func (a llvmAttribute) Apply(v llvm.Value) {
+ if !v.IsAFunction().IsNil() {
+ v.AddFunctionAttr(llvm.Attribute(a))
+ } else {
+ v.AddAttribute(llvm.Attribute(a))
+ }
+}
+
+type tlsAttribute struct{}
+
+func (tlsAttribute) Apply(v llvm.Value) {
+ v.SetThreadLocal(true)
+}
diff --git a/irgen/builtins.go b/irgen/builtins.go
new file mode 100644
index 0000000..404962f
--- /dev/null
+++ b/irgen/builtins.go
@@ -0,0 +1,116 @@
+//===- builtins.go - IR generation for builtins ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for the built-in functions.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+func (fr *frame) callCap(arg *govalue) *govalue {
+ var v llvm.Value
+ switch typ := arg.Type().Underlying().(type) {
+ case *types.Array:
+ v = llvm.ConstInt(fr.llvmtypes.inttype, uint64(typ.Len()), false)
+ case *types.Pointer:
+ atyp := typ.Elem().Underlying().(*types.Array)
+ v = llvm.ConstInt(fr.llvmtypes.inttype, uint64(atyp.Len()), false)
+ case *types.Slice:
+ v = fr.builder.CreateExtractValue(arg.value, 2, "")
+ case *types.Chan:
+ v = fr.runtime.chanCap.call(fr, arg.value)[0]
+ }
+ return newValue(v, types.Typ[types.Int])
+}
+
+func (fr *frame) callLen(arg *govalue) *govalue {
+ var lenvalue llvm.Value
+ switch typ := arg.Type().Underlying().(type) {
+ case *types.Array:
+ lenvalue = llvm.ConstInt(fr.llvmtypes.inttype, uint64(typ.Len()), false)
+ case *types.Pointer:
+ atyp := typ.Elem().Underlying().(*types.Array)
+ lenvalue = llvm.ConstInt(fr.llvmtypes.inttype, uint64(atyp.Len()), false)
+ case *types.Slice:
+ lenvalue = fr.builder.CreateExtractValue(arg.value, 1, "")
+ case *types.Map:
+ lenvalue = fr.runtime.mapLen.call(fr, arg.value)[0]
+ case *types.Basic:
+ if isString(typ) {
+ lenvalue = fr.builder.CreateExtractValue(arg.value, 1, "")
+ }
+ case *types.Chan:
+ lenvalue = fr.runtime.chanLen.call(fr, arg.value)[0]
+ }
+ return newValue(lenvalue, types.Typ[types.Int])
+}
+
+// callAppend takes two slices of the same type, and yields
+// the result of appending the second to the first.
+func (fr *frame) callAppend(a, b *govalue) *govalue {
+ bptr := fr.builder.CreateExtractValue(b.value, 0, "")
+ blen := fr.builder.CreateExtractValue(b.value, 1, "")
+ elemsizeInt64 := fr.types.Sizeof(a.Type().Underlying().(*types.Slice).Elem())
+ elemsize := llvm.ConstInt(fr.target.IntPtrType(), uint64(elemsizeInt64), false)
+ result := fr.runtime.append.call(fr, a.value, bptr, blen, elemsize)[0]
+ return newValue(result, a.Type())
+}
+
+// callCopy takes two slices a and b of the same type, and
+// yields the result of calling "copy(a, b)".
+func (fr *frame) callCopy(dest, source *govalue) *govalue {
+ aptr := fr.builder.CreateExtractValue(dest.value, 0, "")
+ alen := fr.builder.CreateExtractValue(dest.value, 1, "")
+ bptr := fr.builder.CreateExtractValue(source.value, 0, "")
+ blen := fr.builder.CreateExtractValue(source.value, 1, "")
+ aless := fr.builder.CreateICmp(llvm.IntULT, alen, blen, "")
+ minlen := fr.builder.CreateSelect(aless, alen, blen, "")
+ elemsizeInt64 := fr.types.Sizeof(dest.Type().Underlying().(*types.Slice).Elem())
+ elemsize := llvm.ConstInt(fr.types.inttype, uint64(elemsizeInt64), false)
+ bytes := fr.builder.CreateMul(minlen, elemsize, "")
+ fr.runtime.copy.call(fr, aptr, bptr, bytes)
+ return newValue(minlen, types.Typ[types.Int])
+}
+
+func (fr *frame) callRecover(isDeferredRecover bool) *govalue {
+ startbb := fr.builder.GetInsertBlock()
+ recoverbb := llvm.AddBasicBlock(fr.function, "")
+ contbb := llvm.AddBasicBlock(fr.function, "")
+ canRecover := fr.builder.CreateTrunc(fr.canRecover, llvm.Int1Type(), "")
+ fr.builder.CreateCondBr(canRecover, recoverbb, contbb)
+
+ fr.builder.SetInsertPointAtEnd(recoverbb)
+ var recovered llvm.Value
+ if isDeferredRecover {
+ recovered = fr.runtime.deferredRecover.call(fr)[0]
+ } else {
+ recovered = fr.runtime.recover.call(fr)[0]
+ }
+ recoverbb = fr.builder.GetInsertBlock()
+ fr.builder.CreateBr(contbb)
+
+ fr.builder.SetInsertPointAtEnd(contbb)
+ eface := types.NewInterface(nil, nil)
+ llv := fr.builder.CreatePHI(fr.types.ToLLVM(eface), "")
+ llv.AddIncoming(
+ []llvm.Value{llvm.ConstNull(llv.Type()), recovered},
+ []llvm.BasicBlock{startbb, recoverbb},
+ )
+ return newValue(llv, eface)
+}
+
+func (fr *frame) callPanic(arg *govalue) {
+ fr.runtime.panic.call(fr, arg.value)
+ fr.builder.CreateUnreachable()
+}
diff --git a/irgen/cabi.go b/irgen/cabi.go
new file mode 100644
index 0000000..5d6c634
--- /dev/null
+++ b/irgen/cabi.go
@@ -0,0 +1,665 @@
+//===- cabi.go - C ABI abstraction layer ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an abstraction layer for the platform's C ABI (currently
+// supports only Linux/x86_64).
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+type abiArgInfo int
+
+const (
+ AIK_Direct = abiArgInfo(iota)
+ AIK_Indirect
+)
+
+type backendType interface {
+ ToLLVM(llvm.Context) llvm.Type
+}
+
+type ptrBType struct {
+}
+
+func (t ptrBType) ToLLVM(c llvm.Context) llvm.Type {
+ return llvm.PointerType(c.Int8Type(), 0)
+}
+
+type intBType struct {
+ width int
+ signed bool
+}
+
+func (t intBType) ToLLVM(c llvm.Context) llvm.Type {
+ return c.IntType(t.width * 8)
+}
+
+type floatBType struct {
+ isDouble bool
+}
+
+func (t floatBType) ToLLVM(c llvm.Context) llvm.Type {
+ if t.isDouble {
+ return c.DoubleType()
+ } else {
+ return c.FloatType()
+ }
+}
+
+type structBType struct {
+ fields []backendType
+}
+
+func (t structBType) ToLLVM(c llvm.Context) llvm.Type {
+ var lfields []llvm.Type
+ for _, f := range t.fields {
+ lfields = append(lfields, f.ToLLVM(c))
+ }
+ return c.StructType(lfields, false)
+}
+
+type arrayBType struct {
+ length uint64
+ elem backendType
+}
+
+func (t arrayBType) ToLLVM(c llvm.Context) llvm.Type {
+ return llvm.ArrayType(t.elem.ToLLVM(c), int(t.length))
+}
+
+// align returns the smallest y >= x such that y % a == 0.
+func align(x, a int64) int64 {
+ y := x + a - 1
+ return y - y%a
+}
+
+func (tm *llvmTypeMap) sizeofStruct(fields ...types.Type) int64 {
+ var o int64
+ for _, f := range fields {
+ a := tm.Alignof(f)
+ o = align(o, a)
+ o += tm.Sizeof(f)
+ }
+ return o
+}
+
+// This decides whether the x86_64 classification algorithm produces MEMORY for
+// the given type. Given the subset of types that Go supports, this is exactly
+// equivalent to testing the type's size. See in particular the first step of
+// the algorithm and its footnote.
+func (tm *llvmTypeMap) classify(t ...types.Type) abiArgInfo {
+ if tm.sizeofStruct(t...) > 16 {
+ return AIK_Indirect
+ }
+ return AIK_Direct
+}
+
+func (tm *llvmTypeMap) sliceBackendType() backendType {
+ i8ptr := &ptrBType{}
+ uintptr := &intBType{tm.target.PointerSize(), false}
+ return &structBType{[]backendType{i8ptr, uintptr, uintptr}}
+}
+
+func (tm *llvmTypeMap) getBackendType(t types.Type) backendType {
+ switch t := t.(type) {
+ case *types.Named:
+ return tm.getBackendType(t.Underlying())
+
+ case *types.Basic:
+ switch t.Kind() {
+ case types.Bool, types.Uint8:
+ return &intBType{1, false}
+ case types.Int8:
+ return &intBType{1, true}
+ case types.Uint16:
+ return &intBType{2, false}
+ case types.Int16:
+ return &intBType{2, true}
+ case types.Uint32:
+ return &intBType{4, false}
+ case types.Int32:
+ return &intBType{4, true}
+ case types.Uint64:
+ return &intBType{8, false}
+ case types.Int64:
+ return &intBType{8, true}
+ case types.Uint, types.Uintptr:
+ return &intBType{tm.target.PointerSize(), false}
+ case types.Int:
+ return &intBType{tm.target.PointerSize(), true}
+ case types.Float32:
+ return &floatBType{false}
+ case types.Float64:
+ return &floatBType{true}
+ case types.UnsafePointer:
+ return &ptrBType{}
+ case types.Complex64:
+ f32 := &floatBType{false}
+ return &structBType{[]backendType{f32, f32}}
+ case types.Complex128:
+ f64 := &floatBType{true}
+ return &structBType{[]backendType{f64, f64}}
+ case types.String:
+ return &structBType{[]backendType{&ptrBType{}, &intBType{tm.target.PointerSize(), false}}}
+ }
+
+ case *types.Struct:
+ var fields []backendType
+ for i := 0; i != t.NumFields(); i++ {
+ f := t.Field(i)
+ fields = append(fields, tm.getBackendType(f.Type()))
+ }
+ return &structBType{fields}
+
+ case *types.Pointer, *types.Signature, *types.Map, *types.Chan:
+ return &ptrBType{}
+
+ case *types.Interface:
+ i8ptr := &ptrBType{}
+ return &structBType{[]backendType{i8ptr, i8ptr}}
+
+ case *types.Slice:
+ return tm.sliceBackendType()
+
+ case *types.Array:
+ return &arrayBType{uint64(t.Len()), tm.getBackendType(t.Elem())}
+ }
+
+ panic("unhandled type: " + t.String())
+}
+
+type offsetedType struct {
+ typ backendType
+ offset uint64
+}
+
+func (tm *llvmTypeMap) getBackendOffsets(bt backendType) (offsets []offsetedType) {
+ switch bt := bt.(type) {
+ case *structBType:
+ t := bt.ToLLVM(tm.ctx)
+ for i, f := range bt.fields {
+ offset := tm.target.ElementOffset(t, i)
+ fieldOffsets := tm.getBackendOffsets(f)
+ for _, fo := range fieldOffsets {
+ offsets = append(offsets, offsetedType{fo.typ, offset + fo.offset})
+ }
+ }
+
+ case *arrayBType:
+ size := tm.target.TypeAllocSize(bt.elem.ToLLVM(tm.ctx))
+ fieldOffsets := tm.getBackendOffsets(bt.elem)
+ for i := uint64(0); i != bt.length; i++ {
+ for _, fo := range fieldOffsets {
+ offsets = append(offsets, offsetedType{fo.typ, i*size + fo.offset})
+ }
+ }
+
+ default:
+ offsets = []offsetedType{offsetedType{bt, 0}}
+ }
+
+ return
+}
+
+func (tm *llvmTypeMap) classifyEightbyte(offsets []offsetedType, numInt, numSSE *int) llvm.Type {
+ if len(offsets) == 1 {
+ if _, ok := offsets[0].typ.(*floatBType); ok {
+ *numSSE++
+ } else {
+ *numInt++
+ }
+ return offsets[0].typ.ToLLVM(tm.ctx)
+ }
+ // This implements classification for the basic types and step 4 of the
+ // classification algorithm. At this point, the only two possible
+ // classifications are SSE (floats) and INTEGER (everything else).
+ sse := true
+ for _, ot := range offsets {
+ if _, ok := ot.typ.(*floatBType); !ok {
+ sse = false
+ break
+ }
+ }
+ if sse {
+ // This can only be (float, float), which uses an SSE vector.
+ *numSSE++
+ return llvm.VectorType(tm.ctx.FloatType(), 2)
+ } else {
+ *numInt++
+ width := offsets[len(offsets)-1].offset + tm.target.TypeAllocSize(offsets[len(offsets)-1].typ.ToLLVM(tm.ctx)) - offsets[0].offset
+ return tm.ctx.IntType(int(width) * 8)
+ }
+}
+
+func (tm *llvmTypeMap) expandType(argTypes []llvm.Type, argAttrs []llvm.Attribute, bt backendType) ([]llvm.Type, []llvm.Attribute, int, int) {
+ var numInt, numSSE int
+ var argAttr llvm.Attribute
+
+ switch bt := bt.(type) {
+ case *structBType, *arrayBType:
+ bo := tm.getBackendOffsets(bt)
+ sp := 0
+ for sp != len(bo) && bo[sp].offset < 8 {
+ sp++
+ }
+ eb1 := bo[0:sp]
+ eb2 := bo[sp:]
+ if len(eb2) > 0 {
+ argTypes = append(argTypes, tm.classifyEightbyte(eb1, &numInt, &numSSE), tm.classifyEightbyte(eb2, &numInt, &numSSE))
+ argAttrs = append(argAttrs, 0, 0)
+ } else {
+ argTypes = append(argTypes, tm.classifyEightbyte(eb1, &numInt, &numSSE))
+ argAttrs = append(argAttrs, 0)
+ }
+
+ return argTypes, argAttrs, numInt, numSSE
+
+ case *intBType:
+ if bt.width < 4 {
+ if bt.signed {
+ argAttr = llvm.SExtAttribute
+ } else {
+ argAttr = llvm.ZExtAttribute
+ }
+ }
+ }
+
+ argTypes = append(argTypes, tm.classifyEightbyte([]offsetedType{{bt, 0}}, &numInt, &numSSE))
+ argAttrs = append(argAttrs, argAttr)
+
+ return argTypes, argAttrs, numInt, numSSE
+}
+
+type argInfo interface {
+ // Emit instructions to builder to ABI encode val and store result to args.
+ encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, args []llvm.Value, val llvm.Value)
+
+ // Emit instructions to builder to ABI decode and return the resulting Value.
+ decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder) llvm.Value
+}
+
+type retInfo interface {
+ // Prepare args to receive a value. allocaBuilder refers to a builder in the entry block.
+ prepare(ctx llvm.Context, allocaBuilder llvm.Builder, args []llvm.Value)
+
+ // Emit instructions to builder to ABI decode the return value(s), if any. call is the
+ // call instruction. Must be called after prepare().
+ decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, call llvm.Value) []llvm.Value
+
+ // Emit instructions to builder to ABI encode the return value(s), if any, and return.
+ encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, vals []llvm.Value)
+}
+
+type directArgInfo struct {
+ argOffset int
+ argTypes []llvm.Type
+ valType llvm.Type
+}
+
+func directEncode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, argTypes []llvm.Type, args []llvm.Value, val llvm.Value) {
+ valType := val.Type()
+
+ switch len(argTypes) {
+ case 0:
+ // do nothing
+
+ case 1:
+ if argTypes[0].C == valType.C {
+ args[0] = val
+ return
+ }
+ alloca := allocaBuilder.CreateAlloca(valType, "")
+ bitcast := builder.CreateBitCast(alloca, llvm.PointerType(argTypes[0], 0), "")
+ builder.CreateStore(val, alloca)
+ args[0] = builder.CreateLoad(bitcast, "")
+
+ case 2:
+ encodeType := llvm.StructType(argTypes, false)
+ alloca := allocaBuilder.CreateAlloca(valType, "")
+ bitcast := builder.CreateBitCast(alloca, llvm.PointerType(encodeType, 0), "")
+ builder.CreateStore(val, alloca)
+ args[0] = builder.CreateLoad(builder.CreateStructGEP(bitcast, 0, ""), "")
+ args[1] = builder.CreateLoad(builder.CreateStructGEP(bitcast, 1, ""), "")
+
+ default:
+ panic("unexpected argTypes size")
+ }
+}
+
+func (ai *directArgInfo) encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, args []llvm.Value, val llvm.Value) {
+ directEncode(ctx, allocaBuilder, builder, ai.argTypes, args[ai.argOffset:ai.argOffset+len(ai.argTypes)], val)
+}
+
+func directDecode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, valType llvm.Type, args []llvm.Value) llvm.Value {
+ var alloca llvm.Value
+
+ switch len(args) {
+ case 0:
+ return llvm.ConstNull(ctx.StructType(nil, false))
+
+ case 1:
+ if args[0].Type().C == valType.C {
+ return args[0]
+ }
+ alloca = allocaBuilder.CreateAlloca(valType, "")
+ bitcast := builder.CreateBitCast(alloca, llvm.PointerType(args[0].Type(), 0), "")
+ builder.CreateStore(args[0], bitcast)
+
+ case 2:
+ alloca = allocaBuilder.CreateAlloca(valType, "")
+ var argTypes []llvm.Type
+ for _, a := range args {
+ argTypes = append(argTypes, a.Type())
+ }
+ encodeType := ctx.StructType(argTypes, false)
+ bitcast := builder.CreateBitCast(alloca, llvm.PointerType(encodeType, 0), "")
+ builder.CreateStore(args[0], builder.CreateStructGEP(bitcast, 0, ""))
+ builder.CreateStore(args[1], builder.CreateStructGEP(bitcast, 1, ""))
+
+ default:
+ panic("unexpected argTypes size")
+ }
+
+ return builder.CreateLoad(alloca, "")
+}
+
+func (ai *directArgInfo) decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder) llvm.Value {
+ var args []llvm.Value
+ fn := builder.GetInsertBlock().Parent()
+ for i, _ := range ai.argTypes {
+ args = append(args, fn.Param(ai.argOffset+i))
+ }
+ return directDecode(ctx, allocaBuilder, builder, ai.valType, args)
+}
+
+type indirectArgInfo struct {
+ argOffset int
+}
+
+func (ai *indirectArgInfo) encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, args []llvm.Value, val llvm.Value) {
+ alloca := allocaBuilder.CreateAlloca(val.Type(), "")
+ builder.CreateStore(val, alloca)
+ args[ai.argOffset] = alloca
+}
+
+func (ai *indirectArgInfo) decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder) llvm.Value {
+ fn := builder.GetInsertBlock().Parent()
+ return builder.CreateLoad(fn.Param(ai.argOffset), "")
+}
+
+type directRetInfo struct {
+ numResults int
+ retTypes []llvm.Type
+ resultsType llvm.Type
+}
+
+func (ri *directRetInfo) prepare(ctx llvm.Context, allocaBuilder llvm.Builder, args []llvm.Value) {
+}
+
+func (ri *directRetInfo) decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, call llvm.Value) []llvm.Value {
+ var args []llvm.Value
+ switch len(ri.retTypes) {
+ case 0:
+ return nil
+ case 1:
+ args = []llvm.Value{call}
+ default:
+ args = make([]llvm.Value, len(ri.retTypes))
+ for i := 0; i != len(ri.retTypes); i++ {
+ args[i] = builder.CreateExtractValue(call, i, "")
+ }
+ }
+
+ d := directDecode(ctx, allocaBuilder, builder, ri.resultsType, args)
+
+ if ri.numResults == 1 {
+ return []llvm.Value{d}
+ } else {
+ results := make([]llvm.Value, ri.numResults)
+ for i := 0; i != ri.numResults; i++ {
+ results[i] = builder.CreateExtractValue(d, i, "")
+ }
+ return results
+ }
+}
+
+func (ri *directRetInfo) encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, vals []llvm.Value) {
+ if len(ri.retTypes) == 0 {
+ builder.CreateRetVoid()
+ return
+ }
+
+ var val llvm.Value
+ switch ri.numResults {
+ case 1:
+ val = vals[0]
+ default:
+ val = llvm.Undef(ri.resultsType)
+ for i, v := range vals {
+ val = builder.CreateInsertValue(val, v, i, "")
+ }
+ }
+
+ args := make([]llvm.Value, len(ri.retTypes))
+ directEncode(ctx, allocaBuilder, builder, ri.retTypes, args, val)
+
+ var retval llvm.Value
+ switch len(ri.retTypes) {
+ case 1:
+ retval = args[0]
+ default:
+ retval = llvm.Undef(ctx.StructType(ri.retTypes, false))
+ for i, a := range args {
+ retval = builder.CreateInsertValue(retval, a, i, "")
+ }
+ }
+ builder.CreateRet(retval)
+}
+
+type indirectRetInfo struct {
+ numResults int
+ sretSlot llvm.Value
+ resultsType llvm.Type
+}
+
+func (ri *indirectRetInfo) prepare(ctx llvm.Context, allocaBuilder llvm.Builder, args []llvm.Value) {
+ ri.sretSlot = allocaBuilder.CreateAlloca(ri.resultsType, "")
+ args[0] = ri.sretSlot
+}
+
+func (ri *indirectRetInfo) decode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, call llvm.Value) []llvm.Value {
+ if ri.numResults == 1 {
+ return []llvm.Value{builder.CreateLoad(ri.sretSlot, "")}
+ } else {
+ vals := make([]llvm.Value, ri.numResults)
+ for i, _ := range vals {
+ vals[i] = builder.CreateLoad(builder.CreateStructGEP(ri.sretSlot, i, ""), "")
+ }
+ return vals
+ }
+}
+
+func (ri *indirectRetInfo) encode(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, vals []llvm.Value) {
+ fn := builder.GetInsertBlock().Parent()
+ sretSlot := fn.Param(0)
+
+ if ri.numResults == 1 {
+ builder.CreateStore(vals[0], sretSlot)
+ } else {
+ for i, v := range vals {
+ builder.CreateStore(v, builder.CreateStructGEP(sretSlot, i, ""))
+ }
+ }
+ builder.CreateRetVoid()
+}
+
+type functionTypeInfo struct {
+ functionType llvm.Type
+ argAttrs []llvm.Attribute
+ retAttr llvm.Attribute
+ argInfos []argInfo
+ retInf retInfo
+}
+
+func (fi *functionTypeInfo) declare(m llvm.Module, name string) llvm.Value {
+ fn := llvm.AddFunction(m, name, fi.functionType)
+ fn.AddFunctionAttr(fi.retAttr)
+ for i, a := range fi.argAttrs {
+ if a != 0 {
+ fn.Param(i).AddAttribute(a)
+ }
+ }
+ return fn
+}
+
+func (fi *functionTypeInfo) call(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, callee llvm.Value, args []llvm.Value) []llvm.Value {
+ callArgs := make([]llvm.Value, len(fi.argAttrs))
+ for i, a := range args {
+ fi.argInfos[i].encode(ctx, allocaBuilder, builder, callArgs, a)
+ }
+ fi.retInf.prepare(ctx, allocaBuilder, callArgs)
+ typedCallee := builder.CreateBitCast(callee, llvm.PointerType(fi.functionType, 0), "")
+ call := builder.CreateCall(typedCallee, callArgs, "")
+ call.AddInstrAttribute(0, fi.retAttr)
+ for i, a := range fi.argAttrs {
+ call.AddInstrAttribute(i+1, a)
+ }
+ return fi.retInf.decode(ctx, allocaBuilder, builder, call)
+}
+
+func (fi *functionTypeInfo) invoke(ctx llvm.Context, allocaBuilder llvm.Builder, builder llvm.Builder, callee llvm.Value, args []llvm.Value, cont, lpad llvm.BasicBlock) []llvm.Value {
+ callArgs := make([]llvm.Value, len(fi.argAttrs))
+ for i, a := range args {
+ fi.argInfos[i].encode(ctx, allocaBuilder, builder, callArgs, a)
+ }
+ fi.retInf.prepare(ctx, allocaBuilder, callArgs)
+ typedCallee := builder.CreateBitCast(callee, llvm.PointerType(fi.functionType, 0), "")
+ call := builder.CreateInvoke(typedCallee, callArgs, cont, lpad, "")
+ call.AddInstrAttribute(0, fi.retAttr)
+ for i, a := range fi.argAttrs {
+ call.AddInstrAttribute(i+1, a)
+ }
+ builder.SetInsertPointAtEnd(cont)
+ return fi.retInf.decode(ctx, allocaBuilder, builder, call)
+}
+
+func (tm *llvmTypeMap) getFunctionTypeInfo(args []types.Type, results []types.Type) (fi functionTypeInfo) {
+ var returnType llvm.Type
+ var argTypes []llvm.Type
+ if len(results) == 0 {
+ returnType = llvm.VoidType()
+ fi.retInf = &directRetInfo{}
+ } else {
+ aik := tm.classify(results...)
+
+ var resultsType llvm.Type
+ if len(results) == 1 {
+ resultsType = tm.ToLLVM(results[0])
+ } else {
+ elements := make([]llvm.Type, len(results))
+ for i := range elements {
+ elements[i] = tm.ToLLVM(results[i])
+ }
+ resultsType = tm.ctx.StructType(elements, false)
+ }
+
+ switch aik {
+ case AIK_Direct:
+ var retFields []backendType
+ for _, t := range results {
+ retFields = append(retFields, tm.getBackendType(t))
+ }
+ bt := &structBType{retFields}
+
+ retTypes, retAttrs, _, _ := tm.expandType(nil, nil, bt)
+ switch len(retTypes) {
+ case 0: // e.g., empty struct
+ returnType = llvm.VoidType()
+ case 1:
+ returnType = retTypes[0]
+ fi.retAttr = retAttrs[0]
+ case 2:
+ returnType = llvm.StructType(retTypes, false)
+ default:
+ panic("unexpected expandType result")
+ }
+ fi.retInf = &directRetInfo{numResults: len(results), retTypes: retTypes, resultsType: resultsType}
+
+ case AIK_Indirect:
+ returnType = llvm.VoidType()
+ argTypes = []llvm.Type{llvm.PointerType(resultsType, 0)}
+ fi.argAttrs = []llvm.Attribute{llvm.StructRetAttribute}
+ fi.retInf = &indirectRetInfo{numResults: len(results), resultsType: resultsType}
+ }
+ }
+
+ // Keep track of the number of INTEGER/SSE class registers remaining.
+ remainingInt := 6
+ remainingSSE := 8
+
+ for _, arg := range args {
+ aik := tm.classify(arg)
+
+ isDirect := aik == AIK_Direct
+ if isDirect {
+ bt := tm.getBackendType(arg)
+ directArgTypes, directArgAttrs, numInt, numSSE := tm.expandType(argTypes, fi.argAttrs, bt)
+
+ // Check if the argument can fit into the remaining registers, or if
+ // it would just occupy one register (which pushes the whole argument
+ // onto the stack anyway).
+ if numInt <= remainingInt && numSSE <= remainingSSE || numInt+numSSE == 1 {
+ remainingInt -= numInt
+ remainingSSE -= numSSE
+ argInfo := &directArgInfo{argOffset: len(argTypes), valType: bt.ToLLVM(tm.ctx)}
+ fi.argInfos = append(fi.argInfos, argInfo)
+ argTypes = directArgTypes
+ fi.argAttrs = directArgAttrs
+ argInfo.argTypes = argTypes[argInfo.argOffset:len(argTypes)]
+ } else {
+ // No remaining registers; pass on the stack.
+ isDirect = false
+ }
+ }
+
+ if !isDirect {
+ fi.argInfos = append(fi.argInfos, &indirectArgInfo{len(argTypes)})
+ argTypes = append(argTypes, llvm.PointerType(tm.ToLLVM(arg), 0))
+ fi.argAttrs = append(fi.argAttrs, llvm.ByValAttribute)
+ }
+ }
+
+ fi.functionType = llvm.FunctionType(returnType, argTypes, false)
+ return
+}
+
+func (tm *llvmTypeMap) getSignatureInfo(sig *types.Signature) functionTypeInfo {
+ var args, results []types.Type
+ if sig.Recv() != nil {
+ recvtype := sig.Recv().Type()
+ if _, ok := recvtype.Underlying().(*types.Pointer); !ok && recvtype != types.Typ[types.UnsafePointer] {
+ recvtype = types.NewPointer(recvtype)
+ }
+ args = []types.Type{recvtype}
+ }
+
+ for i := 0; i != sig.Params().Len(); i++ {
+ args = append(args, sig.Params().At(i).Type())
+ }
+ for i := 0; i != sig.Results().Len(); i++ {
+ results = append(results, sig.Results().At(i).Type())
+ }
+ return tm.getFunctionTypeInfo(args, results)
+}
diff --git a/irgen/call.go b/irgen/call.go
new file mode 100644
index 0000000..753a086
--- /dev/null
+++ b/irgen/call.go
@@ -0,0 +1,44 @@
+//===- call.go - IR generation for calls ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for calls.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// createCall emits the code for a function call,
+// taking into account receivers, and panic/defer.
+func (fr *frame) createCall(fn *govalue, argValues []*govalue) []*govalue {
+ fntyp := fn.Type().Underlying().(*types.Signature)
+ typinfo := fr.types.getSignatureInfo(fntyp)
+
+ args := make([]llvm.Value, len(argValues))
+ for i, arg := range argValues {
+ args[i] = arg.value
+ }
+ var results []llvm.Value
+ if fr.unwindBlock.IsNil() {
+ results = typinfo.call(fr.types.ctx, fr.allocaBuilder, fr.builder, fn.value, args)
+ } else {
+ contbb := llvm.AddBasicBlock(fr.function, "")
+ results = typinfo.invoke(fr.types.ctx, fr.allocaBuilder, fr.builder, fn.value, args, contbb, fr.unwindBlock)
+ }
+
+ resultValues := make([]*govalue, len(results))
+ for i, res := range results {
+ resultValues[i] = newValue(res, fntyp.Results().At(i).Type())
+ }
+ return resultValues
+}
diff --git a/irgen/channels.go b/irgen/channels.go
new file mode 100644
index 0000000..dc8f2c0
--- /dev/null
+++ b/irgen/channels.go
@@ -0,0 +1,123 @@
+//===- channels.go - IR generation for channels ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for channels.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// makeChan implements make(chantype[, size])
+func (fr *frame) makeChan(chantyp types.Type, size *govalue) *govalue {
+ // TODO(pcc): call __go_new_channel_big here if needed
+ dyntyp := fr.types.ToRuntime(chantyp)
+ size = fr.convert(size, types.Typ[types.Uintptr])
+ ch := fr.runtime.newChannel.call(fr, dyntyp, size.value)[0]
+ return newValue(ch, chantyp)
+}
+
+// chanSend implements ch<- x
+func (fr *frame) chanSend(ch *govalue, elem *govalue) {
+ elemtyp := ch.Type().Underlying().(*types.Chan).Elem()
+ elem = fr.convert(elem, elemtyp)
+ elemptr := fr.allocaBuilder.CreateAlloca(elem.value.Type(), "")
+ fr.builder.CreateStore(elem.value, elemptr)
+ elemptr = fr.builder.CreateBitCast(elemptr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ chantyp := fr.types.ToRuntime(ch.Type())
+ fr.runtime.sendBig.call(fr, chantyp, ch.value, elemptr)
+}
+
+// chanRecv implements x[, ok] = <-ch
+func (fr *frame) chanRecv(ch *govalue, commaOk bool) (x, ok *govalue) {
+ elemtyp := ch.Type().Underlying().(*types.Chan).Elem()
+ ptr := fr.allocaBuilder.CreateAlloca(fr.types.ToLLVM(elemtyp), "")
+ ptri8 := fr.builder.CreateBitCast(ptr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ chantyp := fr.types.ToRuntime(ch.Type())
+
+ if commaOk {
+ okval := fr.runtime.chanrecv2.call(fr, chantyp, ch.value, ptri8)[0]
+ ok = newValue(okval, types.Typ[types.Bool])
+ } else {
+ fr.runtime.receive.call(fr, chantyp, ch.value, ptri8)
+ }
+ x = newValue(fr.builder.CreateLoad(ptr, ""), elemtyp)
+ return
+}
+
+// chanClose implements close(ch)
+func (fr *frame) chanClose(ch *govalue) {
+ fr.runtime.builtinClose.call(fr, ch.value)
+}
+
+// selectState is equivalent to ssa.SelectState
+type selectState struct {
+ Dir types.ChanDir
+ Chan *govalue
+ Send *govalue
+}
+
+func (fr *frame) chanSelect(states []selectState, blocking bool) (index, recvOk *govalue, recvElems []*govalue) {
+ n := uint64(len(states))
+ if !blocking {
+ // non-blocking means there's a default case
+ n++
+ }
+ size := llvm.ConstInt(llvm.Int32Type(), n, false)
+ selectp := fr.runtime.newSelect.call(fr, size)[0]
+
+ // Allocate stack for the values to send and receive.
+ //
+ // TODO(axw) check if received elements have any users, and
+ // elide stack allocation if not (pass nil to recv2 instead.)
+ ptrs := make([]llvm.Value, len(states))
+ for i, state := range states {
+ chantyp := state.Chan.Type().Underlying().(*types.Chan)
+ elemtyp := fr.types.ToLLVM(chantyp.Elem())
+ ptrs[i] = fr.allocaBuilder.CreateAlloca(elemtyp, "")
+ if state.Dir == types.SendOnly {
+ fr.builder.CreateStore(state.Send.value, ptrs[i])
+ } else {
+ recvElems = append(recvElems, newValue(ptrs[i], chantyp.Elem()))
+ }
+ }
+
+ // Create select{send,recv2} calls.
+ var receivedp llvm.Value
+ if len(recvElems) > 0 {
+ receivedp = fr.allocaBuilder.CreateAlloca(fr.types.ToLLVM(types.Typ[types.Bool]), "")
+ }
+ if !blocking {
+ // If the default case is chosen, the index must be -1.
+ fr.runtime.selectdefault.call(fr, selectp, llvm.ConstAllOnes(llvm.Int32Type()))
+ }
+ for i, state := range states {
+ ch := state.Chan.value
+ index := llvm.ConstInt(llvm.Int32Type(), uint64(i), false)
+ if state.Dir == types.SendOnly {
+ fr.runtime.selectsend.call(fr, selectp, ch, ptrs[i], index)
+ } else {
+ fr.runtime.selectrecv2.call(fr, selectp, ch, ptrs[i], receivedp, index)
+ }
+ }
+
+ // Fire off the select.
+ index = newValue(fr.runtime.selectgo.call(fr, selectp)[0], types.Typ[types.Int])
+ if len(recvElems) > 0 {
+ recvOk = newValue(fr.builder.CreateLoad(receivedp, ""), types.Typ[types.Bool])
+ for _, recvElem := range recvElems {
+ recvElem.value = fr.builder.CreateLoad(recvElem.value, "")
+ }
+ }
+ return index, recvOk, recvElems
+}
diff --git a/irgen/closures.go b/irgen/closures.go
new file mode 100644
index 0000000..4ed3ab7
--- /dev/null
+++ b/irgen/closures.go
@@ -0,0 +1,37 @@
+//===- closures.go - IR generation for closures ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for closures.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// makeClosure creates a closure from a function pointer and
+// a set of bindings. The bindings are addresses of captured
+// variables.
+func (fr *frame) makeClosure(fn *govalue, bindings []*govalue) *govalue {
+ govalues := append([]*govalue{fn}, bindings...)
+ fields := make([]*types.Var, len(govalues))
+ for i, v := range govalues {
+ field := types.NewField(0, nil, "_", v.Type(), false)
+ fields[i] = field
+ }
+ block := fr.createTypeMalloc(types.NewStruct(fields, nil))
+ for i, v := range govalues {
+ addressPtr := fr.builder.CreateStructGEP(block, i, "")
+ fr.builder.CreateStore(v.value, addressPtr)
+ }
+ closure := fr.builder.CreateBitCast(block, fn.value.Type(), "")
+ return newValue(closure, fn.Type())
+}
diff --git a/irgen/compiler.go b/irgen/compiler.go
new file mode 100644
index 0000000..17ea877
--- /dev/null
+++ b/irgen/compiler.go
@@ -0,0 +1,361 @@
+//===- compiler.go - IR generator entry point -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the main IR generator entry point, (*Compiler).Compile.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "log"
+ "sort"
+ "strconv"
+ "strings"
+
+ llgobuild "llvm.org/llgo/build"
+ "llvm.org/llgo/debug"
+ "llvm.org/llvm/bindings/go/llvm"
+
+ "llvm.org/llgo/third_party/go.tools/go/gccgoimporter"
+ "llvm.org/llgo/third_party/go.tools/go/importer"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type Module struct {
+ llvm.Module
+ Path string
+ ExportData []byte
+ disposed bool
+}
+
+func (m *Module) Dispose() {
+ if m.disposed {
+ return
+ }
+ m.Module.Dispose()
+ m.disposed = true
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+type CompilerOptions struct {
+ // TargetTriple is the LLVM triple for the target.
+ TargetTriple string
+
+ // GenerateDebug decides whether debug data is
+ // generated in the output module.
+ GenerateDebug bool
+
+ // DebugPrefixMaps is a list of mappings from source prefixes to
+ // replacement prefixes, to be applied in debug info.
+ DebugPrefixMaps []debug.PrefixMap
+
+ // Logger is a logger used for tracing compilation.
+ Logger *log.Logger
+
+ // DumpSSA is a debugging option that dumps each SSA function
+ // to stderr before generating code for it.
+ DumpSSA bool
+
+ // GccgoPath is the path to the gccgo binary whose libgo we read import
+ // data from. If blank, the caller is expected to supply an import
+ // path in ImportPaths.
+ GccgoPath string
+
+ // ImportPaths is the list of additional import paths
+ ImportPaths []string
+
+ // SanitizerAttribute is an attribute to apply to functions to enable
+ // dynamic instrumentation using a sanitizer.
+ SanitizerAttribute llvm.Attribute
+}
+
+type Compiler struct {
+ opts CompilerOptions
+ dataLayout string
+ pnacl bool
+}
+
+func NewCompiler(opts CompilerOptions) (*Compiler, error) {
+ compiler := &Compiler{opts: opts}
+ if strings.ToLower(compiler.opts.TargetTriple) == "pnacl" {
+ compiler.opts.TargetTriple = PNaClTriple
+ compiler.pnacl = true
+ }
+ dataLayout, err := llvmDataLayout(compiler.opts.TargetTriple)
+ if err != nil {
+ return nil, err
+ }
+ compiler.dataLayout = dataLayout
+ return compiler, nil
+}
+
+func (c *Compiler) Compile(filenames []string, importpath string) (m *Module, err error) {
+ target := llvm.NewTargetData(c.dataLayout)
+ compiler := &compiler{
+ CompilerOptions: c.opts,
+ dataLayout: c.dataLayout,
+ target: target,
+ pnacl: c.pnacl,
+ llvmtypes: NewLLVMTypeMap(llvm.GlobalContext(), target),
+ }
+ return compiler.compile(filenames, importpath)
+}
+
+type compiler struct {
+ CompilerOptions
+
+ module *Module
+ dataLayout string
+ target llvm.TargetData
+ fileset *token.FileSet
+
+ runtime *runtimeInterface
+ llvmtypes *llvmTypeMap
+ types *TypeMap
+
+ // runtimetypespkg is the type-checked runtime/types.go file,
+ // which is used for evaluating the types of runtime functions.
+ runtimetypespkg *types.Package
+
+ // pnacl is set to true if the target triple was originally
+ // specified as "pnacl". This is necessary, as the TargetTriple
+ // field will have been updated to the true triple used to
+ // compile PNaCl modules.
+ pnacl bool
+
+ debug *debug.DIBuilder
+}
+
+func (c *compiler) logf(format string, v ...interface{}) {
+ if c.Logger != nil {
+ c.Logger.Printf(format, v...)
+ }
+}
+
+func (c *compiler) addCommonFunctionAttrs(fn llvm.Value) {
+ fn.AddTargetDependentFunctionAttr("disable-tail-calls", "true")
+ fn.AddTargetDependentFunctionAttr("split-stack", "")
+ if attr := c.SanitizerAttribute; attr != 0 {
+ fn.AddFunctionAttr(attr)
+ }
+}
+
+func (compiler *compiler) compile(filenames []string, importpath string) (m *Module, err error) {
+ buildctx, err := llgobuild.ContextFromTriple(compiler.TargetTriple)
+ if err != nil {
+ return nil, err
+ }
+
+ initmap := make(map[*types.Package]gccgoimporter.InitData)
+ var importer types.Importer
+ if compiler.GccgoPath == "" {
+ paths := append(append([]string{}, compiler.ImportPaths...), ".")
+ importer = gccgoimporter.GetImporter(paths, initmap)
+ } else {
+ var inst gccgoimporter.GccgoInstallation
+ err = inst.InitFromDriver(compiler.GccgoPath)
+ if err != nil {
+ return nil, err
+ }
+ importer = inst.GetImporter(compiler.ImportPaths, initmap)
+ }
+
+ impcfg := &loader.Config{
+ Fset: token.NewFileSet(),
+ TypeChecker: types.Config{
+ Import: importer,
+ Sizes: compiler.llvmtypes,
+ },
+ Build: &buildctx.Context,
+ }
+ // Must use parseFiles, so we retain comments;
+ // this is important for annotation processing.
+ astFiles, err := parseFiles(impcfg.Fset, filenames)
+ if err != nil {
+ return nil, err
+ }
+ // If no import path is specified, then set the import
+ // path to be the same as the package's name.
+ if importpath == "" {
+ importpath = astFiles[0].Name.String()
+ }
+ impcfg.CreateFromFiles(importpath, astFiles...)
+ iprog, err := impcfg.Load()
+ if err != nil {
+ return nil, err
+ }
+ program := ssa.Create(iprog, ssa.BareInits)
+ mainPkginfo := iprog.InitialPackages()[0]
+ mainPkg := program.CreatePackage(mainPkginfo)
+
+ // Create a Module, which contains the LLVM module.
+ modulename := importpath
+ compiler.module = &Module{Module: llvm.NewModule(modulename), Path: modulename}
+ compiler.module.SetTarget(compiler.TargetTriple)
+ compiler.module.SetDataLayout(compiler.dataLayout)
+
+ // Create a new translation unit.
+ unit := newUnit(compiler, mainPkg)
+
+ // Create the runtime interface.
+ compiler.runtime, err = newRuntimeInterface(compiler.module.Module, compiler.llvmtypes)
+ if err != nil {
+ return nil, err
+ }
+
+ mainPkg.Build()
+
+ // Create a struct responsible for mapping static types to LLVM types,
+ // and to runtime/dynamic type values.
+ compiler.types = NewTypeMap(
+ mainPkg,
+ compiler.llvmtypes,
+ compiler.module.Module,
+ compiler.runtime,
+ MethodResolver(unit),
+ )
+
+ if compiler.GenerateDebug {
+ compiler.debug = debug.NewDIBuilder(
+ types.Sizes(compiler.llvmtypes),
+ compiler.module.Module,
+ impcfg.Fset,
+ compiler.DebugPrefixMaps,
+ )
+ defer compiler.debug.Destroy()
+ defer compiler.debug.Finalize()
+ }
+
+ unit.translatePackage(mainPkg)
+ compiler.processAnnotations(unit, mainPkginfo)
+
+ if importpath == "main" {
+ if err = compiler.createInitMainFunction(mainPkg, initmap); err != nil {
+ return nil, fmt.Errorf("failed to create __go_init_main: %v", err)
+ }
+ } else {
+ compiler.module.ExportData = compiler.buildExportData(mainPkg, initmap)
+ }
+
+ return compiler.module, nil
+}
+
+type byPriorityThenFunc []gccgoimporter.PackageInit
+
+func (a byPriorityThenFunc) Len() int { return len(a) }
+func (a byPriorityThenFunc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPriorityThenFunc) Less(i, j int) bool {
+ switch {
+ case a[i].Priority < a[j].Priority:
+ return true
+ case a[i].Priority > a[j].Priority:
+ return false
+ case a[i].InitFunc < a[j].InitFunc:
+ return true
+ default:
+ return false
+ }
+}
+
+func (c *compiler) buildPackageInitData(mainPkg *ssa.Package, initmap map[*types.Package]gccgoimporter.InitData) gccgoimporter.InitData {
+ var inits []gccgoimporter.PackageInit
+ for _, imp := range mainPkg.Object.Imports() {
+ inits = append(inits, initmap[imp].Inits...)
+ }
+ sort.Sort(byPriorityThenFunc(inits))
+
+ // Deduplicate init entries. We want to preserve the entry with the highest priority.
+ // Normally a package's priorities will be consistent among its dependencies, but it is
+ // possible for them to be different. For example, if a standard library test augments a
+ // package which is a dependency of 'regexp' (which is imported by every test main package)
+ // with additional dependencies, those dependencies may cause the package under test to
+ // receive a higher priority than indicated by its init clause in 'regexp'.
+ uniqinits := make([]gccgoimporter.PackageInit, len(inits))
+ uniqinitpos := len(inits)
+ uniqinitnames := make(map[string]struct{})
+ for i, _ := range inits {
+ init := inits[len(inits)-1-i]
+ if _, ok := uniqinitnames[init.InitFunc]; !ok {
+ uniqinitnames[init.InitFunc] = struct{}{}
+ uniqinitpos--
+ uniqinits[uniqinitpos] = init
+ }
+ }
+ uniqinits = uniqinits[uniqinitpos:]
+
+ ourprio := 1
+ if len(uniqinits) != 0 {
+ ourprio = uniqinits[len(uniqinits)-1].Priority + 1
+ }
+
+ if imp := mainPkg.Func("init"); imp != nil {
+ impname := c.types.mc.mangleFunctionName(imp)
+ uniqinits = append(uniqinits, gccgoimporter.PackageInit{mainPkg.Object.Name(), impname, ourprio})
+ }
+
+ return gccgoimporter.InitData{ourprio, uniqinits}
+}
+
+func (c *compiler) createInitMainFunction(mainPkg *ssa.Package, initmap map[*types.Package]gccgoimporter.InitData) error {
+ initdata := c.buildPackageInitData(mainPkg, initmap)
+
+ ftyp := llvm.FunctionType(llvm.VoidType(), nil, false)
+ initMain := llvm.AddFunction(c.module.Module, "__go_init_main", ftyp)
+ c.addCommonFunctionAttrs(initMain)
+ entry := llvm.AddBasicBlock(initMain, "entry")
+
+ builder := llvm.GlobalContext().NewBuilder()
+ defer builder.Dispose()
+ builder.SetInsertPointAtEnd(entry)
+
+ for _, init := range initdata.Inits {
+ initfn := c.module.Module.NamedFunction(init.InitFunc)
+ if initfn.IsNil() {
+ initfn = llvm.AddFunction(c.module.Module, init.InitFunc, ftyp)
+ }
+ builder.CreateCall(initfn, nil, "")
+ }
+
+ builder.CreateRetVoid()
+ return nil
+}
+
+func (c *compiler) buildExportData(mainPkg *ssa.Package, initmap map[*types.Package]gccgoimporter.InitData) []byte {
+ exportData := importer.ExportData(mainPkg.Object)
+ b := bytes.NewBuffer(exportData)
+
+ initdata := c.buildPackageInitData(mainPkg, initmap)
+ b.WriteString("v1;\npriority ")
+ b.WriteString(strconv.Itoa(initdata.Priority))
+ b.WriteString(";\n")
+
+ if len(initdata.Inits) != 0 {
+ b.WriteString("init")
+ for _, init := range initdata.Inits {
+ b.WriteRune(' ')
+ b.WriteString(init.Name)
+ b.WriteRune(' ')
+ b.WriteString(init.InitFunc)
+ b.WriteRune(' ')
+ b.WriteString(strconv.Itoa(init.Priority))
+ }
+ b.WriteString(";\n")
+ }
+
+ return b.Bytes()
+}
+
+// vim: set ft=go :
diff --git a/irgen/errors.go b/irgen/errors.go
new file mode 100644
index 0000000..73f498e
--- /dev/null
+++ b/irgen/errors.go
@@ -0,0 +1,72 @@
+//===- errors.go - IR generation for run-time panics ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for triggering run-time panics.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+const (
+ // From go-runtime-error.c
+ gccgoRuntimeErrorSLICE_INDEX_OUT_OF_BOUNDS = 0
+ gccgoRuntimeErrorARRAY_INDEX_OUT_OF_BOUNDS = 1
+ gccgoRuntimeErrorSTRING_INDEX_OUT_OF_BOUNDS = 2
+ gccgoRuntimeErrorSLICE_SLICE_OUT_OF_BOUNDS = 3
+ gccgoRuntimeErrorARRAY_SLICE_OUT_OF_BOUNDS = 4
+ gccgoRuntimeErrorSTRING_SLICE_OUT_OF_BOUNDS = 5
+ gccgoRuntimeErrorNIL_DEREFERENCE = 6
+ gccgoRuntimeErrorMAKE_SLICE_OUT_OF_BOUNDS = 7
+ gccgoRuntimeErrorMAKE_MAP_OUT_OF_BOUNDS = 8
+ gccgoRuntimeErrorMAKE_CHAN_OUT_OF_BOUNDS = 9
+ gccgoRuntimeErrorDIVISION_BY_ZERO = 10
+ gccgoRuntimeErrorCount = 11
+)
+
+func (fr *frame) setBranchWeightMetadata(br llvm.Value, trueweight, falseweight uint64) {
+ mdprof := llvm.MDKindID("prof")
+
+ mdnode := llvm.MDNode([]llvm.Value{
+ llvm.MDString("branch_weights"),
+ llvm.ConstInt(llvm.Int32Type(), trueweight, false),
+ llvm.ConstInt(llvm.Int32Type(), falseweight, false),
+ })
+
+ br.SetMetadata(mdprof, mdnode)
+}
+
+func (fr *frame) condBrRuntimeError(cond llvm.Value, errcode uint64) {
+ if cond.IsNull() {
+ return
+ }
+
+ errorbb := fr.runtimeErrorBlocks[errcode]
+ newbb := errorbb.C == nil
+ if newbb {
+ errorbb = llvm.AddBasicBlock(fr.function, "")
+ fr.runtimeErrorBlocks[errcode] = errorbb
+ }
+
+ contbb := llvm.AddBasicBlock(fr.function, "")
+
+ br := fr.builder.CreateCondBr(cond, errorbb, contbb)
+ fr.setBranchWeightMetadata(br, 1, 1000)
+
+ if newbb {
+ fr.builder.SetInsertPointAtEnd(errorbb)
+ fr.runtime.runtimeError.call(fr, llvm.ConstInt(llvm.Int32Type(), errcode, false))
+ fr.builder.CreateUnreachable()
+ }
+
+ fr.builder.SetInsertPointAtEnd(contbb)
+}
diff --git a/irgen/indirect.go b/irgen/indirect.go
new file mode 100644
index 0000000..ea27007
--- /dev/null
+++ b/irgen/indirect.go
@@ -0,0 +1,125 @@
+//===- indirect.go - IR generation for thunks -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for thunks required by the "defer" and
+// "go" builtins.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// createThunk creates a thunk from a
+// given function and arguments, suitable for use with
+// "defer" and "go".
+func (fr *frame) createThunk(call ssa.CallInstruction) (thunk llvm.Value, arg llvm.Value) {
+ seenarg := make(map[ssa.Value]bool)
+ var args []ssa.Value
+ var argtypes []*types.Var
+
+ packArg := func(arg ssa.Value) {
+ switch arg.(type) {
+ case *ssa.Builtin, *ssa.Function, *ssa.Const, *ssa.Global:
+ // Do nothing: we can generate these in the thunk
+ default:
+ if !seenarg[arg] {
+ seenarg[arg] = true
+ args = append(args, arg)
+ field := types.NewField(0, nil, "_", arg.Type(), true)
+ argtypes = append(argtypes, field)
+ }
+ }
+ }
+
+ packArg(call.Common().Value)
+ for _, arg := range call.Common().Args {
+ packArg(arg)
+ }
+
+ var isRecoverCall bool
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ var structllptr llvm.Type
+ if len(args) == 0 {
+ if builtin, ok := call.Common().Value.(*ssa.Builtin); ok {
+ isRecoverCall = builtin.Name() == "recover"
+ }
+ if isRecoverCall {
+ // When creating a thunk for recover(), we must pass fr.canRecover.
+ arg = fr.builder.CreateZExt(fr.canRecover, fr.target.IntPtrType(), "")
+ arg = fr.builder.CreateIntToPtr(arg, i8ptr, "")
+ } else {
+ arg = llvm.ConstPointerNull(i8ptr)
+ }
+ } else {
+ structtype := types.NewStruct(argtypes, nil)
+ arg = fr.createTypeMalloc(structtype)
+ structllptr = arg.Type()
+ for i, ssaarg := range args {
+ argptr := fr.builder.CreateStructGEP(arg, i, "")
+ fr.builder.CreateStore(fr.llvmvalue(ssaarg), argptr)
+ }
+ arg = fr.builder.CreateBitCast(arg, i8ptr, "")
+ }
+
+ thunkfntype := llvm.FunctionType(llvm.VoidType(), []llvm.Type{i8ptr}, false)
+ thunkfn := llvm.AddFunction(fr.module.Module, "", thunkfntype)
+ thunkfn.SetLinkage(llvm.InternalLinkage)
+ fr.addCommonFunctionAttrs(thunkfn)
+
+ thunkfr := newFrame(fr.unit, thunkfn)
+ defer thunkfr.dispose()
+
+ prologuebb := llvm.AddBasicBlock(thunkfn, "prologue")
+ thunkfr.builder.SetInsertPointAtEnd(prologuebb)
+
+ if isRecoverCall {
+ thunkarg := thunkfn.Param(0)
+ thunkarg = thunkfr.builder.CreatePtrToInt(thunkarg, fr.target.IntPtrType(), "")
+ thunkfr.canRecover = thunkfr.builder.CreateTrunc(thunkarg, llvm.Int1Type(), "")
+ } else if len(args) > 0 {
+ thunkarg := thunkfn.Param(0)
+ thunkarg = thunkfr.builder.CreateBitCast(thunkarg, structllptr, "")
+ for i, ssaarg := range args {
+ thunkargptr := thunkfr.builder.CreateStructGEP(thunkarg, i, "")
+ thunkarg := thunkfr.builder.CreateLoad(thunkargptr, "")
+ thunkfr.env[ssaarg] = newValue(thunkarg, ssaarg.Type())
+ }
+ }
+
+ _, isDefer := call.(*ssa.Defer)
+
+ entrybb := llvm.AddBasicBlock(thunkfn, "entry")
+ br := thunkfr.builder.CreateBr(entrybb)
+ thunkfr.allocaBuilder.SetInsertPointBefore(br)
+
+ thunkfr.builder.SetInsertPointAtEnd(entrybb)
+ var exitbb llvm.BasicBlock
+ if isDefer {
+ exitbb = llvm.AddBasicBlock(thunkfn, "exit")
+ thunkfr.runtime.setDeferRetaddr.call(thunkfr, llvm.BlockAddress(thunkfn, exitbb))
+ }
+ if isDefer && isRecoverCall {
+ thunkfr.callRecover(true)
+ } else {
+ thunkfr.callInstruction(call)
+ }
+ if isDefer {
+ thunkfr.builder.CreateBr(exitbb)
+ thunkfr.builder.SetInsertPointAtEnd(exitbb)
+ }
+ thunkfr.builder.CreateRetVoid()
+
+ thunk = fr.builder.CreateBitCast(thunkfn, i8ptr, "")
+ return
+}
diff --git a/irgen/interfaces.go b/irgen/interfaces.go
new file mode 100644
index 0000000..0c45182
--- /dev/null
+++ b/irgen/interfaces.go
@@ -0,0 +1,196 @@
+//===- interfaces.go - IR generation for interfaces -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for dealing with interface values.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// interfaceMethod returns a function and receiver pointer for the specified
+// interface and method pair.
+func (fr *frame) interfaceMethod(lliface llvm.Value, ifacety types.Type, method *types.Func) (fn, recv *govalue) {
+ llitab := fr.builder.CreateExtractValue(lliface, 0, "")
+ recv = newValue(fr.builder.CreateExtractValue(lliface, 1, ""), types.Typ[types.UnsafePointer])
+ methodset := fr.types.MethodSet(ifacety)
+ // TODO(axw) cache ordered method index
+ index := -1
+ for i, m := range orderedMethodSet(methodset) {
+ if m.Obj() == method {
+ index = i
+ break
+ }
+ }
+ if index == -1 {
+ panic("could not find method index")
+ }
+ llitab = fr.builder.CreateBitCast(llitab, llvm.PointerType(llvm.PointerType(llvm.Int8Type(), 0), 0), "")
+ // Skip runtime type pointer.
+ llifnptr := fr.builder.CreateGEP(llitab, []llvm.Value{
+ llvm.ConstInt(llvm.Int32Type(), uint64(index+1), false),
+ }, "")
+
+ llifn := fr.builder.CreateLoad(llifnptr, "")
+ // Replace receiver type with unsafe.Pointer.
+ recvparam := types.NewParam(0, nil, "", types.Typ[types.UnsafePointer])
+ sig := method.Type().(*types.Signature)
+ sig = types.NewSignature(nil, recvparam, sig.Params(), sig.Results(), sig.Variadic())
+ fn = newValue(llifn, sig)
+ return
+}
+
+// compareInterfaces emits code to compare two interfaces for
+// equality.
+func (fr *frame) compareInterfaces(a, b *govalue) *govalue {
+ aNull := a.value.IsNull()
+ bNull := b.value.IsNull()
+ if aNull && bNull {
+ return newValue(boolLLVMValue(true), types.Typ[types.Bool])
+ }
+
+ compare := fr.runtime.emptyInterfaceCompare
+ aI := a.Type().Underlying().(*types.Interface).NumMethods() > 0
+ bI := b.Type().Underlying().(*types.Interface).NumMethods() > 0
+ switch {
+ case aI && bI:
+ compare = fr.runtime.interfaceCompare
+ case aI:
+ a = fr.convertI2E(a)
+ case bI:
+ b = fr.convertI2E(b)
+ }
+
+ result := compare.call(fr, a.value, b.value)[0]
+ result = fr.builder.CreateIsNull(result, "")
+ result = fr.builder.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+}
+
+func (fr *frame) makeInterface(llv llvm.Value, vty types.Type, iface types.Type) *govalue {
+ if _, ok := vty.Underlying().(*types.Pointer); !ok {
+ ptr := fr.createTypeMalloc(vty)
+ fr.builder.CreateStore(llv, ptr)
+ llv = ptr
+ }
+ return fr.makeInterfaceFromPointer(llv, vty, iface)
+}
+
+func (fr *frame) makeInterfaceFromPointer(vptr llvm.Value, vty types.Type, iface types.Type) *govalue {
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ llv := fr.builder.CreateBitCast(vptr, i8ptr, "")
+ value := llvm.Undef(fr.types.ToLLVM(iface))
+ itab := fr.types.getItabPointer(vty, iface.Underlying().(*types.Interface))
+ value = fr.builder.CreateInsertValue(value, itab, 0, "")
+ value = fr.builder.CreateInsertValue(value, llv, 1, "")
+ return newValue(value, iface)
+}
+
+// Reads the type descriptor from the given interface type.
+func (fr *frame) getInterfaceTypeDescriptor(v *govalue) llvm.Value {
+ isempty := v.Type().Underlying().(*types.Interface).NumMethods() == 0
+ itab := fr.builder.CreateExtractValue(v.value, 0, "")
+ if isempty {
+ return itab
+ } else {
+ itabnonnull := fr.builder.CreateIsNotNull(itab, "")
+ return fr.loadOrNull(itabnonnull, itab, types.Typ[types.UnsafePointer]).value
+ }
+}
+
+// Reads the value from the given interface type, assuming that the
+// interface holds a value of the correct type.
+func (fr *frame) getInterfaceValue(v *govalue, ty types.Type) *govalue {
+ val := fr.builder.CreateExtractValue(v.value, 1, "")
+ if _, ok := ty.Underlying().(*types.Pointer); !ok {
+ typedval := fr.builder.CreateBitCast(val, llvm.PointerType(fr.types.ToLLVM(ty), 0), "")
+ val = fr.builder.CreateLoad(typedval, "")
+ }
+ return newValue(val, ty)
+}
+
+// If cond is true, reads the value from the given interface type, otherwise
+// returns a nil value.
+func (fr *frame) getInterfaceValueOrNull(cond llvm.Value, v *govalue, ty types.Type) *govalue {
+ val := fr.builder.CreateExtractValue(v.value, 1, "")
+ if _, ok := ty.Underlying().(*types.Pointer); ok {
+ val = fr.builder.CreateSelect(cond, val, llvm.ConstNull(val.Type()), "")
+ } else {
+ val = fr.loadOrNull(cond, val, ty).value
+ }
+ return newValue(val, ty)
+}
+
+func (fr *frame) interfaceTypeCheck(val *govalue, ty types.Type) (v *govalue, okval *govalue) {
+ tytd := fr.types.ToRuntime(ty)
+ if _, ok := ty.Underlying().(*types.Interface); ok {
+ var result []llvm.Value
+ if val.Type().Underlying().(*types.Interface).NumMethods() > 0 {
+ result = fr.runtime.ifaceI2I2.call(fr, tytd, val.value)
+ } else {
+ result = fr.runtime.ifaceE2I2.call(fr, tytd, val.value)
+ }
+ v = newValue(result[0], ty)
+ okval = newValue(result[1], types.Typ[types.Bool])
+ } else {
+ valtd := fr.getInterfaceTypeDescriptor(val)
+ tyequal := fr.runtime.typeDescriptorsEqual.call(fr, valtd, tytd)[0]
+ okval = newValue(tyequal, types.Typ[types.Bool])
+ tyequal = fr.builder.CreateTrunc(tyequal, llvm.Int1Type(), "")
+
+ v = fr.getInterfaceValueOrNull(tyequal, val, ty)
+ }
+ return
+}
+
+func (fr *frame) interfaceTypeAssert(val *govalue, ty types.Type) *govalue {
+ if _, ok := ty.Underlying().(*types.Interface); ok {
+ return fr.changeInterface(val, ty, true)
+ } else {
+ valtytd := fr.types.ToRuntime(val.Type())
+ valtd := fr.getInterfaceTypeDescriptor(val)
+ tytd := fr.types.ToRuntime(ty)
+ fr.runtime.checkInterfaceType.call(fr, valtd, tytd, valtytd)
+
+ return fr.getInterfaceValue(val, ty)
+ }
+}
+
+// convertI2E converts a non-empty interface value to an empty interface.
+func (fr *frame) convertI2E(v *govalue) *govalue {
+ td := fr.getInterfaceTypeDescriptor(v)
+ val := fr.builder.CreateExtractValue(v.value, 1, "")
+
+ typ := types.NewInterface(nil, nil)
+ intf := llvm.Undef(fr.types.ToLLVM(typ))
+ intf = fr.builder.CreateInsertValue(intf, td, 0, "")
+ intf = fr.builder.CreateInsertValue(intf, val, 1, "")
+ return newValue(intf, typ)
+}
+
+func (fr *frame) changeInterface(v *govalue, ty types.Type, assert bool) *govalue {
+ td := fr.getInterfaceTypeDescriptor(v)
+ tytd := fr.types.ToRuntime(ty)
+ var itab llvm.Value
+ if assert {
+ itab = fr.runtime.assertInterface.call(fr, tytd, td)[0]
+ } else {
+ itab = fr.runtime.convertInterface.call(fr, tytd, td)[0]
+ }
+ val := fr.builder.CreateExtractValue(v.value, 1, "")
+
+ intf := llvm.Undef(fr.types.ToLLVM(ty))
+ intf = fr.builder.CreateInsertValue(intf, itab, 0, "")
+ intf = fr.builder.CreateInsertValue(intf, val, 1, "")
+ return newValue(intf, ty)
+}
diff --git a/irgen/maps.go b/irgen/maps.go
new file mode 100644
index 0000000..ada24e1
--- /dev/null
+++ b/irgen/maps.go
@@ -0,0 +1,153 @@
+//===- maps.go - IR generation for maps -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for maps.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// makeMap implements make(maptype[, initial space])
+func (fr *frame) makeMap(typ types.Type, cap_ *govalue) *govalue {
+ // TODO(pcc): call __go_new_map_big here if needed
+ dyntyp := fr.types.getMapDescriptorPointer(typ)
+ dyntyp = fr.builder.CreateBitCast(dyntyp, llvm.PointerType(llvm.Int8Type(), 0), "")
+ var cap llvm.Value
+ if cap_ != nil {
+ cap = fr.convert(cap_, types.Typ[types.Uintptr]).value
+ } else {
+ cap = llvm.ConstNull(fr.types.inttype)
+ }
+ m := fr.runtime.newMap.call(fr, dyntyp, cap)
+ return newValue(m[0], typ)
+}
+
+// mapLookup implements v[, ok] = m[k]
+func (fr *frame) mapLookup(m, k *govalue) (v *govalue, ok *govalue) {
+ llk := k.value
+ pk := fr.allocaBuilder.CreateAlloca(llk.Type(), "")
+ fr.builder.CreateStore(llk, pk)
+ valptr := fr.runtime.mapIndex.call(fr, m.value, pk, boolLLVMValue(false))[0]
+ valptr.AddInstrAttribute(2, llvm.NoCaptureAttribute)
+ valptr.AddInstrAttribute(2, llvm.ReadOnlyAttribute)
+ okbit := fr.builder.CreateIsNotNull(valptr, "")
+
+ elemtyp := m.Type().Underlying().(*types.Map).Elem()
+ ok = newValue(fr.builder.CreateZExt(okbit, llvm.Int8Type(), ""), types.Typ[types.Bool])
+ v = fr.loadOrNull(okbit, valptr, elemtyp)
+ return
+}
+
+// mapUpdate implements m[k] = v
+func (fr *frame) mapUpdate(m, k, v *govalue) {
+ llk := k.value
+ pk := fr.allocaBuilder.CreateAlloca(llk.Type(), "")
+ fr.builder.CreateStore(llk, pk)
+ valptr := fr.runtime.mapIndex.call(fr, m.value, pk, boolLLVMValue(true))[0]
+ valptr.AddInstrAttribute(2, llvm.NoCaptureAttribute)
+ valptr.AddInstrAttribute(2, llvm.ReadOnlyAttribute)
+
+ elemtyp := m.Type().Underlying().(*types.Map).Elem()
+ llelemtyp := fr.types.ToLLVM(elemtyp)
+ typedvalptr := fr.builder.CreateBitCast(valptr, llvm.PointerType(llelemtyp, 0), "")
+ fr.builder.CreateStore(v.value, typedvalptr)
+}
+
+// mapDelete implements delete(m, k)
+func (fr *frame) mapDelete(m, k *govalue) {
+ llk := k.value
+ pk := fr.allocaBuilder.CreateAlloca(llk.Type(), "")
+ fr.builder.CreateStore(llk, pk)
+ fr.runtime.mapdelete.call(fr, m.value, pk)
+}
+
+// mapIterInit creates a map iterator
+func (fr *frame) mapIterInit(m *govalue) []*govalue {
+ // We represent an iterator as a tuple (map, *bool). The second element
+ // controls whether the code we generate for "next" (below) calls the
+ // runtime function for the first or the next element. We let the
+ // optimizer reorganize this into something more sensible.
+ isinit := fr.allocaBuilder.CreateAlloca(llvm.Int1Type(), "")
+ fr.builder.CreateStore(llvm.ConstNull(llvm.Int1Type()), isinit)
+
+ return []*govalue{m, newValue(isinit, types.NewPointer(types.Typ[types.Bool]))}
+}
+
+// mapIterNext advances the iterator, and returns the tuple (ok, k, v).
+func (fr *frame) mapIterNext(iter []*govalue) []*govalue {
+ maptyp := iter[0].Type().Underlying().(*types.Map)
+ ktyp := maptyp.Key()
+ klltyp := fr.types.ToLLVM(ktyp)
+ vtyp := maptyp.Elem()
+ vlltyp := fr.types.ToLLVM(vtyp)
+
+ m, isinitptr := iter[0], iter[1]
+
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ mapiterbufty := llvm.ArrayType(i8ptr, 4)
+ mapiterbuf := fr.allocaBuilder.CreateAlloca(mapiterbufty, "")
+ mapiterbufelem0ptr := fr.builder.CreateStructGEP(mapiterbuf, 0, "")
+
+ keybuf := fr.allocaBuilder.CreateAlloca(klltyp, "")
+ keyptr := fr.builder.CreateBitCast(keybuf, i8ptr, "")
+ valbuf := fr.allocaBuilder.CreateAlloca(vlltyp, "")
+ valptr := fr.builder.CreateBitCast(valbuf, i8ptr, "")
+
+ isinit := fr.builder.CreateLoad(isinitptr.value, "")
+
+ initbb := llvm.AddBasicBlock(fr.function, "")
+ nextbb := llvm.AddBasicBlock(fr.function, "")
+ contbb := llvm.AddBasicBlock(fr.function, "")
+
+ fr.builder.CreateCondBr(isinit, nextbb, initbb)
+
+ fr.builder.SetInsertPointAtEnd(initbb)
+ fr.builder.CreateStore(llvm.ConstAllOnes(llvm.Int1Type()), isinitptr.value)
+ fr.runtime.mapiterinit.call(fr, m.value, mapiterbufelem0ptr)
+ fr.builder.CreateBr(contbb)
+
+ fr.builder.SetInsertPointAtEnd(nextbb)
+ fr.runtime.mapiternext.call(fr, mapiterbufelem0ptr)
+ fr.builder.CreateBr(contbb)
+
+ fr.builder.SetInsertPointAtEnd(contbb)
+ mapiterbufelem0 := fr.builder.CreateLoad(mapiterbufelem0ptr, "")
+ okbit := fr.builder.CreateIsNotNull(mapiterbufelem0, "")
+ ok := fr.builder.CreateZExt(okbit, llvm.Int8Type(), "")
+
+ loadbb := llvm.AddBasicBlock(fr.function, "")
+ cont2bb := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.CreateCondBr(okbit, loadbb, cont2bb)
+
+ fr.builder.SetInsertPointAtEnd(loadbb)
+ fr.runtime.mapiter2.call(fr, mapiterbufelem0ptr, keyptr, valptr)
+ loadbb = fr.builder.GetInsertBlock()
+ loadedkey := fr.builder.CreateLoad(keybuf, "")
+ loadedval := fr.builder.CreateLoad(valbuf, "")
+ fr.builder.CreateBr(cont2bb)
+
+ fr.builder.SetInsertPointAtEnd(cont2bb)
+ k := fr.builder.CreatePHI(klltyp, "")
+ k.AddIncoming(
+ []llvm.Value{llvm.ConstNull(klltyp), loadedkey},
+ []llvm.BasicBlock{contbb, loadbb},
+ )
+ v := fr.builder.CreatePHI(vlltyp, "")
+ v.AddIncoming(
+ []llvm.Value{llvm.ConstNull(vlltyp), loadedval},
+ []llvm.BasicBlock{contbb, loadbb},
+ )
+
+ return []*govalue{newValue(ok, types.Typ[types.Bool]), newValue(k, ktyp), newValue(v, vtyp)}
+}
diff --git a/irgen/parser.go b/irgen/parser.go
new file mode 100644
index 0000000..143134b
--- /dev/null
+++ b/irgen/parser.go
@@ -0,0 +1,42 @@
+//===- parser.go - parser wrapper -----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions for calling the parser in an appropriate way for
+// llgo.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+)
+
+func parseFile(fset *token.FileSet, filename string) (*ast.File, error) {
+ mode := parser.DeclarationErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, nil, mode)
+}
+
+func parseFiles(fset *token.FileSet, filenames []string) ([]*ast.File, error) {
+ files := make([]*ast.File, len(filenames))
+ for i, filename := range filenames {
+ file, err := parseFile(fset, filename)
+ if _, ok := err.(scanner.ErrorList); ok {
+ return nil, err
+ } else if err != nil {
+ return nil, fmt.Errorf("%q: %v", filename, err)
+ }
+ files[i] = file
+ }
+ return files, nil
+}
diff --git a/irgen/predicates.go b/irgen/predicates.go
new file mode 100644
index 0000000..6607da8
--- /dev/null
+++ b/irgen/predicates.go
@@ -0,0 +1,58 @@
+//===- predicates.go - type predicates ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements commonly used type predicates.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func isBoolean(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsBoolean != 0
+}
+
+func isInteger(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsInteger != 0
+}
+
+func isUnsigned(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsUnsigned != 0
+}
+
+func isFloat(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsFloat != 0
+}
+
+func isComplex(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsComplex != 0
+}
+
+func isString(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsString != 0
+}
+
+func isUntyped(typ types.Type) bool {
+ t, ok := typ.Underlying().(*types.Basic)
+ return ok && t.Info()&types.IsUntyped != 0
+}
+
+func isSlice(typ types.Type, bkind types.BasicKind) bool {
+ t, ok := typ.Underlying().(*types.Slice)
+ return ok && types.Identical(t.Elem().Underlying(), types.Typ[bkind])
+}
diff --git a/irgen/println.go b/irgen/println.go
new file mode 100644
index 0000000..98ccedb
--- /dev/null
+++ b/irgen/println.go
@@ -0,0 +1,93 @@
+//===- println.go - IR generation for print and println -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for the print and println built-in
+// functions.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func (fr *frame) printValues(println_ bool, values ...*govalue) {
+ for i, value := range values {
+ llvm_value := value.value
+
+ typ := value.Type().Underlying()
+ if name, isname := typ.(*types.Named); isname {
+ typ = name.Underlying()
+ }
+
+ if println_ && i > 0 {
+ fr.runtime.printSpace.call(fr)
+ }
+ switch typ := typ.(type) {
+ case *types.Basic:
+ switch typ.Kind() {
+ case types.Uint8, types.Uint16, types.Uint32, types.Uintptr, types.Uint, types.Uint64:
+ i64 := fr.llvmtypes.ctx.Int64Type()
+ zext := fr.builder.CreateZExt(llvm_value, i64, "")
+ fr.runtime.printUint64.call(fr, zext)
+
+ case types.Int, types.Int8, types.Int16, types.Int32, types.Int64:
+ i64 := fr.llvmtypes.ctx.Int64Type()
+ sext := fr.builder.CreateSExt(llvm_value, i64, "")
+ fr.runtime.printInt64.call(fr, sext)
+
+ case types.Float32:
+ llvm_value = fr.builder.CreateFPExt(llvm_value, fr.llvmtypes.ctx.DoubleType(), "")
+ fallthrough
+ case types.Float64:
+ fr.runtime.printDouble.call(fr, llvm_value)
+
+ case types.Complex64:
+ llvm_value = fr.convert(value, types.Typ[types.Complex128]).value
+ fallthrough
+ case types.Complex128:
+ fr.runtime.printComplex.call(fr, llvm_value)
+
+ case types.String, types.UntypedString:
+ fr.runtime.printString.call(fr, llvm_value)
+
+ case types.Bool:
+ fr.runtime.printBool.call(fr, llvm_value)
+
+ case types.UnsafePointer:
+ fr.runtime.printPointer.call(fr, llvm_value)
+
+ default:
+ panic(fmt.Sprint("Unhandled Basic Kind: ", typ.Kind))
+ }
+
+ case *types.Interface:
+ if typ.Empty() {
+ fr.runtime.printEmptyInterface.call(fr, llvm_value)
+ } else {
+ fr.runtime.printInterface.call(fr, llvm_value)
+ }
+
+ case *types.Slice:
+ fr.runtime.printSlice.call(fr, llvm_value)
+
+ case *types.Pointer, *types.Map, *types.Chan, *types.Signature:
+ fr.runtime.printPointer.call(fr, llvm_value)
+
+ default:
+ panic(fmt.Sprintf("Unhandled type kind: %s (%T)", typ, typ))
+ }
+ }
+ if println_ {
+ fr.runtime.printNl.call(fr)
+ }
+}
diff --git a/irgen/runtime.go b/irgen/runtime.go
new file mode 100644
index 0000000..e14a026
--- /dev/null
+++ b/irgen/runtime.go
@@ -0,0 +1,614 @@
+//===- runtime.go - IR generation for runtime calls -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for calls to the runtime library.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "strconv"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+type runtimeFnInfo struct {
+ fi *functionTypeInfo
+ fn llvm.Value
+}
+
+func (rfi *runtimeFnInfo) init(tm *llvmTypeMap, m llvm.Module, name string, args []types.Type, results []types.Type) {
+ rfi.fi = new(functionTypeInfo)
+ *rfi.fi = tm.getFunctionTypeInfo(args, results)
+ rfi.fn = rfi.fi.declare(m, name)
+}
+
+func (rfi *runtimeFnInfo) call(f *frame, args ...llvm.Value) []llvm.Value {
+ if f.unwindBlock.IsNil() {
+ return rfi.callOnly(f, args...)
+ } else {
+ return rfi.invoke(f, f.unwindBlock, args...)
+ }
+}
+
+func (rfi *runtimeFnInfo) callOnly(f *frame, args ...llvm.Value) []llvm.Value {
+ return rfi.fi.call(f.llvmtypes.ctx, f.allocaBuilder, f.builder, rfi.fn, args)
+}
+
+func (rfi *runtimeFnInfo) invoke(f *frame, lpad llvm.BasicBlock, args ...llvm.Value) []llvm.Value {
+ contbb := llvm.AddBasicBlock(f.function, "")
+ return rfi.fi.invoke(f.llvmtypes.ctx, f.allocaBuilder, f.builder, rfi.fn, args, contbb, lpad)
+}
+
+// runtimeInterface is a struct containing references to
+// runtime types and intrinsic function declarations.
+type runtimeInterface struct {
+ // LLVM intrinsics
+ memcpy,
+ memset,
+ returnaddress llvm.Value
+
+ // Exception handling support
+ gccgoPersonality llvm.Value
+ gccgoExceptionType llvm.Type
+
+ // Runtime intrinsics
+ append,
+ assertInterface,
+ canRecover,
+ chanCap,
+ chanLen,
+ chanrecv2,
+ checkDefer,
+ checkInterfaceType,
+ builtinClose,
+ convertInterface,
+ copy,
+ Defer,
+ deferredRecover,
+ emptyInterfaceCompare,
+ getClosure,
+ Go,
+ ifaceE2I2,
+ ifaceI2I2,
+ intArrayToString,
+ interfaceCompare,
+ intToString,
+ makeSlice,
+ mapdelete,
+ mapiter2,
+ mapiterinit,
+ mapiternext,
+ mapIndex,
+ mapLen,
+ New,
+ newChannel,
+ newMap,
+ NewNopointers,
+ newSelect,
+ panic,
+ printBool,
+ printComplex,
+ printDouble,
+ printEmptyInterface,
+ printInterface,
+ printInt64,
+ printNl,
+ printPointer,
+ printSlice,
+ printSpace,
+ printString,
+ printUint64,
+ receive,
+ recover,
+ registerGcRoots,
+ runtimeError,
+ selectdefault,
+ selectrecv2,
+ selectsend,
+ selectgo,
+ sendBig,
+ setClosure,
+ setDeferRetaddr,
+ strcmp,
+ stringiter2,
+ stringPlus,
+ stringSlice,
+ stringToIntArray,
+ typeDescriptorsEqual,
+ undefer runtimeFnInfo
+}
+
+func newRuntimeInterface(module llvm.Module, tm *llvmTypeMap) (*runtimeInterface, error) {
+ var ri runtimeInterface
+
+ Bool := types.Typ[types.Bool]
+ Complex128 := types.Typ[types.Complex128]
+ Float64 := types.Typ[types.Float64]
+ Int32 := types.Typ[types.Int32]
+ Int64 := types.Typ[types.Int64]
+ Int := types.Typ[types.Int]
+ Rune := types.Typ[types.Rune]
+ String := types.Typ[types.String]
+ Uintptr := types.Typ[types.Uintptr]
+ UnsafePointer := types.Typ[types.UnsafePointer]
+
+ EmptyInterface := types.NewInterface(nil, nil)
+ IntSlice := types.NewSlice(types.Typ[types.Int])
+
+ for _, rt := range [...]struct {
+ name string
+ rfi *runtimeFnInfo
+ args, res []types.Type
+ attrs []llvm.Attribute
+ }{
+ {
+ name: "__go_append",
+ rfi: &ri.append,
+ args: []types.Type{IntSlice, UnsafePointer, Uintptr, Uintptr},
+ res: []types.Type{IntSlice},
+ },
+ {
+ name: "__go_assert_interface",
+ rfi: &ri.assertInterface,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_can_recover",
+ rfi: &ri.canRecover,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Bool},
+ },
+ {
+ name: "__go_chan_cap",
+ rfi: &ri.chanCap,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Int},
+ },
+ {
+ name: "__go_chan_len",
+ rfi: &ri.chanLen,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Int},
+ },
+ {
+ name: "runtime.chanrecv2",
+ rfi: &ri.chanrecv2,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ res: []types.Type{Bool},
+ },
+ {
+ name: "__go_check_defer",
+ rfi: &ri.checkDefer,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_check_interface_type",
+ rfi: &ri.checkInterfaceType,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "__go_builtin_close",
+ rfi: &ri.builtinClose,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_convert_interface",
+ rfi: &ri.convertInterface,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_copy",
+ rfi: &ri.copy,
+ args: []types.Type{UnsafePointer, UnsafePointer, Uintptr},
+ },
+ {
+ name: "__go_defer",
+ rfi: &ri.Defer,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "__go_deferred_recover",
+ rfi: &ri.deferredRecover,
+ res: []types.Type{EmptyInterface},
+ },
+ {
+ name: "__go_empty_interface_compare",
+ rfi: &ri.emptyInterfaceCompare,
+ args: []types.Type{EmptyInterface, EmptyInterface},
+ res: []types.Type{Int},
+ },
+ {
+ name: "__go_get_closure",
+ rfi: &ri.getClosure,
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_go",
+ rfi: &ri.Go,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "runtime.ifaceE2I2",
+ rfi: &ri.ifaceE2I2,
+ args: []types.Type{UnsafePointer, EmptyInterface},
+ res: []types.Type{EmptyInterface, Bool},
+ },
+ {
+ name: "runtime.ifaceI2I2",
+ rfi: &ri.ifaceI2I2,
+ args: []types.Type{UnsafePointer, EmptyInterface},
+ res: []types.Type{EmptyInterface, Bool},
+ },
+ {
+ name: "__go_int_array_to_string",
+ rfi: &ri.intArrayToString,
+ args: []types.Type{UnsafePointer, Int},
+ res: []types.Type{String},
+ },
+ {
+ name: "__go_int_to_string",
+ rfi: &ri.intToString,
+ args: []types.Type{Int},
+ res: []types.Type{String},
+ },
+ {
+ name: "__go_interface_compare",
+ rfi: &ri.interfaceCompare,
+ args: []types.Type{EmptyInterface, EmptyInterface},
+ res: []types.Type{Int},
+ },
+ {
+ name: "__go_make_slice2",
+ rfi: &ri.makeSlice,
+ args: []types.Type{UnsafePointer, Uintptr, Uintptr},
+ res: []types.Type{IntSlice},
+ },
+ {
+ name: "runtime.mapdelete",
+ rfi: &ri.mapdelete,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "runtime.mapiter2",
+ rfi: &ri.mapiter2,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "runtime.mapiterinit",
+ rfi: &ri.mapiterinit,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "runtime.mapiternext",
+ rfi: &ri.mapiternext,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_map_index",
+ rfi: &ri.mapIndex,
+ args: []types.Type{UnsafePointer, UnsafePointer, Bool},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_map_len",
+ rfi: &ri.mapLen,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Int},
+ },
+ {
+ name: "__go_new",
+ rfi: &ri.New,
+ args: []types.Type{UnsafePointer, Uintptr},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_new_channel",
+ rfi: &ri.newChannel,
+ args: []types.Type{UnsafePointer, Uintptr},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_new_map",
+ rfi: &ri.newMap,
+ args: []types.Type{UnsafePointer, Uintptr},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_new_nopointers",
+ rfi: &ri.NewNopointers,
+ args: []types.Type{UnsafePointer, Uintptr},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "runtime.newselect",
+ rfi: &ri.newSelect,
+ args: []types.Type{Int32},
+ res: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_panic",
+ rfi: &ri.panic,
+ args: []types.Type{EmptyInterface},
+ attrs: []llvm.Attribute{llvm.NoReturnAttribute},
+ },
+ {
+ name: "__go_print_bool",
+ rfi: &ri.printBool,
+ args: []types.Type{Bool},
+ },
+ {
+ name: "__go_print_complex",
+ rfi: &ri.printComplex,
+ args: []types.Type{Complex128},
+ },
+ {
+ name: "__go_print_double",
+ rfi: &ri.printDouble,
+ args: []types.Type{Float64},
+ },
+ {
+ name: "__go_print_empty_interface",
+ rfi: &ri.printEmptyInterface,
+ args: []types.Type{EmptyInterface},
+ },
+ {
+ name: "__go_print_interface",
+ rfi: &ri.printInterface,
+ args: []types.Type{EmptyInterface},
+ },
+ {
+ name: "__go_print_int64",
+ rfi: &ri.printInt64,
+ args: []types.Type{Int64},
+ },
+ {
+ name: "__go_print_nl",
+ rfi: &ri.printNl,
+ },
+ {
+ name: "__go_print_pointer",
+ rfi: &ri.printPointer,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_print_slice",
+ rfi: &ri.printSlice,
+ args: []types.Type{IntSlice},
+ },
+ {
+ name: "__go_print_space",
+ rfi: &ri.printSpace,
+ },
+ {
+ name: "__go_print_string",
+ rfi: &ri.printString,
+ args: []types.Type{String},
+ },
+ {
+ name: "__go_print_uint64",
+ rfi: &ri.printUint64,
+ args: []types.Type{Int64},
+ },
+ {
+ name: "__go_receive",
+ rfi: &ri.receive,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "__go_recover",
+ rfi: &ri.recover,
+ res: []types.Type{EmptyInterface},
+ },
+ {
+ name: "__go_register_gc_roots",
+ rfi: &ri.registerGcRoots,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_runtime_error",
+ rfi: &ri.runtimeError,
+ args: []types.Type{Int32},
+ attrs: []llvm.Attribute{llvm.NoReturnAttribute},
+ },
+ {
+ name: "runtime.selectdefault",
+ rfi: &ri.selectdefault,
+ args: []types.Type{UnsafePointer, Int32},
+ },
+ {
+ name: "runtime.selectgo",
+ rfi: &ri.selectgo,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Int},
+ },
+ {
+ name: "runtime.selectrecv2",
+ rfi: &ri.selectrecv2,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer, UnsafePointer, Int32},
+ },
+ {
+ name: "runtime.selectsend",
+ rfi: &ri.selectsend,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer, Int32},
+ },
+ {
+ name: "__go_send_big",
+ rfi: &ri.sendBig,
+ args: []types.Type{UnsafePointer, UnsafePointer, UnsafePointer},
+ },
+ {
+ name: "__go_set_closure",
+ rfi: &ri.setClosure,
+ args: []types.Type{UnsafePointer},
+ },
+ {
+ name: "__go_set_defer_retaddr",
+ rfi: &ri.setDeferRetaddr,
+ args: []types.Type{UnsafePointer},
+ res: []types.Type{Bool},
+ },
+ {
+ name: "__go_strcmp",
+ rfi: &ri.strcmp,
+ args: []types.Type{String, String},
+ res: []types.Type{Int},
+ },
+ {
+ name: "__go_string_plus",
+ rfi: &ri.stringPlus,
+ args: []types.Type{String, String},
+ res: []types.Type{String},
+ },
+ {
+ name: "__go_string_slice",
+ rfi: &ri.stringSlice,
+ args: []types.Type{String, Int, Int},
+ res: []types.Type{String},
+ },
+ {
+ name: "__go_string_to_int_array",
+ rfi: &ri.stringToIntArray,
+ args: []types.Type{String},
+ res: []types.Type{IntSlice},
+ },
+ {
+ name: "runtime.stringiter2",
+ rfi: &ri.stringiter2,
+ args: []types.Type{String, Int},
+ res: []types.Type{Int, Rune},
+ },
+ {
+ name: "__go_type_descriptors_equal",
+ rfi: &ri.typeDescriptorsEqual,
+ args: []types.Type{UnsafePointer, UnsafePointer},
+ res: []types.Type{Bool},
+ },
+ {
+ name: "__go_undefer",
+ rfi: &ri.undefer,
+ args: []types.Type{UnsafePointer},
+ },
+ } {
+ rt.rfi.init(tm, module, rt.name, rt.args, rt.res)
+ for _, attr := range rt.attrs {
+ rt.rfi.fn.AddFunctionAttr(attr)
+ }
+ }
+
+ memsetName := "llvm.memset.p0i8.i" + strconv.Itoa(tm.target.IntPtrType().IntTypeWidth())
+ memsetType := llvm.FunctionType(
+ llvm.VoidType(),
+ []llvm.Type{
+ llvm.PointerType(llvm.Int8Type(), 0),
+ llvm.Int8Type(),
+ tm.target.IntPtrType(),
+ llvm.Int32Type(),
+ llvm.Int1Type(),
+ },
+ false,
+ )
+ ri.memset = llvm.AddFunction(module, memsetName, memsetType)
+
+ memcpyName := "llvm.memcpy.p0i8.p0i8.i" + strconv.Itoa(tm.target.IntPtrType().IntTypeWidth())
+ memcpyType := llvm.FunctionType(
+ llvm.VoidType(),
+ []llvm.Type{
+ llvm.PointerType(llvm.Int8Type(), 0),
+ llvm.PointerType(llvm.Int8Type(), 0),
+ tm.target.IntPtrType(),
+ llvm.Int32Type(),
+ llvm.Int1Type(),
+ },
+ false,
+ )
+ ri.memcpy = llvm.AddFunction(module, memcpyName, memcpyType)
+
+ returnaddressType := llvm.FunctionType(
+ llvm.PointerType(llvm.Int8Type(), 0),
+ []llvm.Type{llvm.Int32Type()},
+ false,
+ )
+ ri.returnaddress = llvm.AddFunction(module, "llvm.returnaddress", returnaddressType)
+
+ gccgoPersonalityType := llvm.FunctionType(
+ llvm.Int32Type(),
+ []llvm.Type{
+ llvm.Int32Type(),
+ llvm.Int64Type(),
+ llvm.PointerType(llvm.Int8Type(), 0),
+ llvm.PointerType(llvm.Int8Type(), 0),
+ },
+ false,
+ )
+ ri.gccgoPersonality = llvm.AddFunction(module, "__gccgo_personality_v0", gccgoPersonalityType)
+
+ ri.gccgoExceptionType = llvm.StructType(
+ []llvm.Type{
+ llvm.PointerType(llvm.Int8Type(), 0),
+ llvm.Int32Type(),
+ },
+ false,
+ )
+
+ return &ri, nil
+}
+
+func (fr *frame) createZExtOrTrunc(v llvm.Value, t llvm.Type, name string) llvm.Value {
+ switch n := v.Type().IntTypeWidth() - t.IntTypeWidth(); {
+ case n < 0:
+ v = fr.builder.CreateZExt(v, fr.target.IntPtrType(), name)
+ case n > 0:
+ v = fr.builder.CreateTrunc(v, fr.target.IntPtrType(), name)
+ }
+ return v
+}
+
+func (fr *frame) createMalloc(size llvm.Value) llvm.Value {
+ return fr.runtime.NewNopointers.callOnly(fr,
+ llvm.ConstNull(llvm.PointerType(llvm.Int8Type(), 0)),
+ fr.createZExtOrTrunc(size, fr.target.IntPtrType(), ""))[0]
+}
+
+func (fr *frame) createTypeMalloc(t types.Type) llvm.Value {
+ size := llvm.ConstInt(fr.target.IntPtrType(), uint64(fr.llvmtypes.Sizeof(t)), false)
+ malloc := fr.runtime.New.callOnly(fr, fr.types.ToRuntime(t), size)[0]
+ return fr.builder.CreateBitCast(malloc, llvm.PointerType(fr.types.ToLLVM(t), 0), "")
+}
+
+func (fr *frame) memsetZero(ptr llvm.Value, size llvm.Value) {
+ memset := fr.runtime.memset
+ ptr = fr.builder.CreateBitCast(ptr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ fill := llvm.ConstNull(llvm.Int8Type())
+ size = fr.createZExtOrTrunc(size, fr.target.IntPtrType(), "")
+ align := llvm.ConstInt(llvm.Int32Type(), 1, false)
+ isvolatile := llvm.ConstNull(llvm.Int1Type())
+ fr.builder.CreateCall(memset, []llvm.Value{ptr, fill, size, align, isvolatile}, "")
+}
+
+func (fr *frame) memcpy(dest llvm.Value, src llvm.Value, size llvm.Value) {
+ memcpy := fr.runtime.memcpy
+ dest = fr.builder.CreateBitCast(dest, llvm.PointerType(llvm.Int8Type(), 0), "")
+ src = fr.builder.CreateBitCast(src, llvm.PointerType(llvm.Int8Type(), 0), "")
+ size = fr.createZExtOrTrunc(size, fr.target.IntPtrType(), "")
+ align := llvm.ConstInt(llvm.Int32Type(), 1, false)
+ isvolatile := llvm.ConstNull(llvm.Int1Type())
+ fr.builder.CreateCall(memcpy, []llvm.Value{dest, src, size, align, isvolatile}, "")
+}
+
+func (fr *frame) returnAddress(level uint64) llvm.Value {
+ returnaddress := fr.runtime.returnaddress
+ levelValue := llvm.ConstInt(llvm.Int32Type(), level, false)
+ return fr.builder.CreateCall(returnaddress, []llvm.Value{levelValue}, "")
+}
diff --git a/irgen/slice.go b/irgen/slice.go
new file mode 100644
index 0000000..ee267a8
--- /dev/null
+++ b/irgen/slice.go
@@ -0,0 +1,106 @@
+//===- slice.go - IR generation for slices --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for slices.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// makeSlice allocates a new slice with the optional length and capacity,
+// initialising its contents to their zero values.
+func (fr *frame) makeSlice(sliceType types.Type, length, capacity *govalue) *govalue {
+ length = fr.convert(length, types.Typ[types.Uintptr])
+ capacity = fr.convert(capacity, types.Typ[types.Uintptr])
+ runtimeType := fr.types.ToRuntime(sliceType)
+ llslice := fr.runtime.makeSlice.call(fr, runtimeType, length.value, capacity.value)
+ return newValue(llslice[0], sliceType)
+}
+
+func (fr *frame) slice(x llvm.Value, xtyp types.Type, low, high, max llvm.Value) llvm.Value {
+ if !low.IsNil() {
+ low = fr.createZExtOrTrunc(low, fr.types.inttype, "")
+ } else {
+ low = llvm.ConstNull(fr.types.inttype)
+ }
+ if !high.IsNil() {
+ high = fr.createZExtOrTrunc(high, fr.types.inttype, "")
+ }
+ if !max.IsNil() {
+ max = fr.createZExtOrTrunc(max, fr.types.inttype, "")
+ }
+
+ var arrayptr, arraylen, arraycap llvm.Value
+ var elemtyp types.Type
+ var errcode uint64
+ switch typ := xtyp.Underlying().(type) {
+ case *types.Pointer: // *array
+ errcode = gccgoRuntimeErrorARRAY_SLICE_OUT_OF_BOUNDS
+ arraytyp := typ.Elem().Underlying().(*types.Array)
+ elemtyp = arraytyp.Elem()
+ arrayptr = x
+ arrayptr = fr.builder.CreateBitCast(arrayptr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ arraylen = llvm.ConstInt(fr.llvmtypes.inttype, uint64(arraytyp.Len()), false)
+ arraycap = arraylen
+ case *types.Slice:
+ errcode = gccgoRuntimeErrorSLICE_SLICE_OUT_OF_BOUNDS
+ elemtyp = typ.Elem()
+ arrayptr = fr.builder.CreateExtractValue(x, 0, "")
+ arraylen = fr.builder.CreateExtractValue(x, 1, "")
+ arraycap = fr.builder.CreateExtractValue(x, 2, "")
+ case *types.Basic:
+ if high.IsNil() {
+ high = llvm.ConstAllOnes(fr.types.inttype) // -1
+ }
+ result := fr.runtime.stringSlice.call(fr, x, low, high)
+ return result[0]
+ default:
+ panic("unimplemented")
+ }
+ if high.IsNil() {
+ high = arraylen
+ }
+ if max.IsNil() {
+ max = arraycap
+ }
+
+ // Bounds checking: 0 <= low <= high <= max <= cap
+ zero := llvm.ConstNull(fr.types.inttype)
+ l0 := fr.builder.CreateICmp(llvm.IntSLT, low, zero, "")
+ hl := fr.builder.CreateICmp(llvm.IntSLT, high, low, "")
+ mh := fr.builder.CreateICmp(llvm.IntSLT, max, high, "")
+ cm := fr.builder.CreateICmp(llvm.IntSLT, arraycap, max, "")
+
+ cond := fr.builder.CreateOr(l0, hl, "")
+ cond = fr.builder.CreateOr(cond, mh, "")
+ cond = fr.builder.CreateOr(cond, cm, "")
+
+ fr.condBrRuntimeError(cond, errcode)
+
+ slicelen := fr.builder.CreateSub(high, low, "")
+ slicecap := fr.builder.CreateSub(max, low, "")
+
+ elemsize := llvm.ConstInt(fr.llvmtypes.inttype, uint64(fr.llvmtypes.Sizeof(elemtyp)), false)
+ offset := fr.builder.CreateMul(low, elemsize, "")
+
+ sliceptr := fr.builder.CreateInBoundsGEP(arrayptr, []llvm.Value{offset}, "")
+
+ llslicetyp := fr.llvmtypes.sliceBackendType().ToLLVM(fr.llvmtypes.ctx)
+ sliceValue := llvm.Undef(llslicetyp)
+ sliceValue = fr.builder.CreateInsertValue(sliceValue, sliceptr, 0, "")
+ sliceValue = fr.builder.CreateInsertValue(sliceValue, slicelen, 1, "")
+ sliceValue = fr.builder.CreateInsertValue(sliceValue, slicecap, 2, "")
+
+ return sliceValue
+}
diff --git a/irgen/ssa.go b/irgen/ssa.go
new file mode 100644
index 0000000..b49a1bb
--- /dev/null
+++ b/irgen/ssa.go
@@ -0,0 +1,1303 @@
+//===- ssa.go - IR generation from go/ssa ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level LLVM IR generation from go/ssa form.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "sort"
+
+ "llvm.org/llgo/ssaopt"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// A globalInit is used to temporarily store a global's initializer until
+// we are ready to build it.
+type globalInit struct {
+ val llvm.Value
+ elems []globalInit
+}
+
+func (gi *globalInit) update(typ llvm.Type, indices []uint32, val llvm.Value) {
+ if len(indices) == 0 {
+ gi.val = val
+ return
+ }
+
+ if gi.val.C != nil {
+ gi.val = llvm.ConstInsertValue(gi.val, val, indices)
+ }
+
+ tk := typ.TypeKind()
+
+ if len(gi.elems) == 0 {
+ switch tk {
+ case llvm.StructTypeKind:
+ gi.elems = make([]globalInit, typ.StructElementTypesCount())
+ case llvm.ArrayTypeKind:
+ gi.elems = make([]globalInit, typ.ArrayLength())
+ default:
+ panic("unexpected type")
+ }
+ }
+
+ var eltyp llvm.Type
+ switch tk {
+ case llvm.StructTypeKind:
+ eltyp = typ.StructElementTypes()[indices[0]]
+ case llvm.ArrayTypeKind:
+ eltyp = typ.ElementType()
+ default:
+ panic("unexpected type")
+ }
+
+ gi.elems[indices[0]].update(eltyp, indices[1:], val)
+}
+
+func (gi *globalInit) build(typ llvm.Type) llvm.Value {
+ if gi.val.C != nil {
+ return gi.val
+ }
+ if len(gi.elems) == 0 {
+ return llvm.ConstNull(typ)
+ }
+
+ switch typ.TypeKind() {
+ case llvm.StructTypeKind:
+ eltypes := typ.StructElementTypes()
+ elems := make([]llvm.Value, len(eltypes))
+ for i, eltyp := range eltypes {
+ elems[i] = gi.elems[i].build(eltyp)
+ }
+ return llvm.ConstStruct(elems, false)
+ case llvm.ArrayTypeKind:
+ eltyp := typ.ElementType()
+ elems := make([]llvm.Value, len(gi.elems))
+ for i := range gi.elems {
+ elems[i] = gi.elems[i].build(eltyp)
+ }
+ return llvm.ConstArray(eltyp, elems)
+ default:
+ panic("unexpected type")
+ }
+}
+
+type unit struct {
+ *compiler
+ pkg *ssa.Package
+ globals map[ssa.Value]llvm.Value
+ globalInits map[llvm.Value]*globalInit
+
+ // funcDescriptors maps *ssa.Functions to function descriptors,
+ // the first-class representation of functions.
+ funcDescriptors map[*ssa.Function]llvm.Value
+
+ // undefinedFuncs contains functions that have been resolved
+ // (declared) but not defined.
+ undefinedFuncs map[*ssa.Function]bool
+
+ gcRoots []llvm.Value
+}
+
+func newUnit(c *compiler, pkg *ssa.Package) *unit {
+ u := &unit{
+ compiler: c,
+ pkg: pkg,
+ globals: make(map[ssa.Value]llvm.Value),
+ globalInits: make(map[llvm.Value]*globalInit),
+ funcDescriptors: make(map[*ssa.Function]llvm.Value),
+ undefinedFuncs: make(map[*ssa.Function]bool),
+ }
+ return u
+}
+
+type byMemberName []ssa.Member
+
+func (ms byMemberName) Len() int { return len(ms) }
+func (ms byMemberName) Swap(i, j int) {
+ ms[i], ms[j] = ms[j], ms[i]
+}
+func (ms byMemberName) Less(i, j int) bool {
+ return ms[i].Name() < ms[j].Name()
+}
+
+type byFunctionString []*ssa.Function
+
+func (fns byFunctionString) Len() int { return len(fns) }
+func (fns byFunctionString) Swap(i, j int) {
+ fns[i], fns[j] = fns[j], fns[i]
+}
+func (fns byFunctionString) Less(i, j int) bool {
+ return fns[i].String() < fns[j].String()
+}
+
+// Emit functions in order of their fully qualified names. This is so that a
+// bootstrap build can be verified by comparing the stage2 and stage3 binaries.
+func (u *unit) defineFunctionsInOrder(functions map[*ssa.Function]bool) {
+ fns := []*ssa.Function{}
+ for f, _ := range functions {
+ fns = append(fns, f)
+ }
+ sort.Sort(byFunctionString(fns))
+ for _, f := range fns {
+ u.defineFunction(f)
+ }
+}
+
+// translatePackage translates an *ssa.Package into an LLVM module, and returns
+// the translation unit information.
+func (u *unit) translatePackage(pkg *ssa.Package) {
+ ms := make([]ssa.Member, len(pkg.Members))
+ i := 0
+ for _, m := range pkg.Members {
+ ms[i] = m
+ i++
+ }
+
+ sort.Sort(byMemberName(ms))
+
+ // Initialize global storage and type descriptors for this package.
+ // We must create globals regardless of whether they're referenced,
+ // hence the duplication in frame.value.
+ for _, m := range ms {
+ switch v := m.(type) {
+ case *ssa.Global:
+ elemtyp := deref(v.Type())
+ llelemtyp := u.llvmtypes.ToLLVM(elemtyp)
+ vname := u.types.mc.mangleGlobalName(v)
+ global := llvm.AddGlobal(u.module.Module, llelemtyp, vname)
+ if !v.Object().Exported() {
+ global.SetLinkage(llvm.InternalLinkage)
+ }
+ u.addGlobal(global, elemtyp)
+ global = llvm.ConstBitCast(global, u.llvmtypes.ToLLVM(v.Type()))
+ u.globals[v] = global
+ case *ssa.Type:
+ u.types.getTypeDescriptorPointer(v.Type())
+ }
+ }
+
+ // Define functions.
+ u.defineFunctionsInOrder(ssautil.AllFunctions(pkg.Prog))
+
+ // Emit initializers for type descriptors, which may trigger
+ // the resolution of additional functions.
+ u.types.emitTypeDescInitializers()
+
+ // Define remaining functions that were resolved during
+ // runtime type mapping, but not defined.
+ u.defineFunctionsInOrder(u.undefinedFuncs)
+
+ // Set initializers for globals.
+ for global, init := range u.globalInits {
+ initval := init.build(global.Type().ElementType())
+ global.SetInitializer(initval)
+ }
+}
+
+func (u *unit) addGlobal(global llvm.Value, ty types.Type) {
+ u.globalInits[global] = new(globalInit)
+
+ if hasPointers(ty) {
+ global = llvm.ConstBitCast(global, llvm.PointerType(llvm.Int8Type(), 0))
+ size := llvm.ConstInt(u.types.inttype, uint64(u.types.Sizeof(ty)), false)
+ root := llvm.ConstStruct([]llvm.Value{global, size}, false)
+ u.gcRoots = append(u.gcRoots, root)
+ }
+}
+
+// ResolveMethod implements MethodResolver.ResolveMethod.
+func (u *unit) ResolveMethod(s *types.Selection) *govalue {
+ m := u.pkg.Prog.Method(s)
+ llfn := u.resolveFunctionGlobal(m)
+ llfn = llvm.ConstBitCast(llfn, llvm.PointerType(llvm.Int8Type(), 0))
+ return newValue(llfn, m.Signature)
+}
+
+// resolveFunctionDescriptorGlobal returns a reference to the LLVM global
+// storing the function's descriptor.
+func (u *unit) resolveFunctionDescriptorGlobal(f *ssa.Function) llvm.Value {
+ llfd, ok := u.funcDescriptors[f]
+ if !ok {
+ name := u.types.mc.mangleFunctionName(f) + "$descriptor"
+ llfd = llvm.AddGlobal(u.module.Module, llvm.PointerType(llvm.Int8Type(), 0), name)
+ llfd.SetGlobalConstant(true)
+ u.funcDescriptors[f] = llfd
+ }
+ return llfd
+}
+
+// resolveFunctionDescriptor returns a function's
+// first-class value representation.
+func (u *unit) resolveFunctionDescriptor(f *ssa.Function) *govalue {
+ llfd := u.resolveFunctionDescriptorGlobal(f)
+ llfd = llvm.ConstBitCast(llfd, llvm.PointerType(llvm.Int8Type(), 0))
+ return newValue(llfd, f.Signature)
+}
+
+// resolveFunctionGlobal returns an llvm.Value for a function global.
+func (u *unit) resolveFunctionGlobal(f *ssa.Function) llvm.Value {
+ if v, ok := u.globals[f]; ok {
+ return v
+ }
+ name := u.types.mc.mangleFunctionName(f)
+ // It's possible that the function already exists in the module;
+ // for example, if it's a runtime intrinsic that the compiler
+ // has already referenced.
+ llvmFunction := u.module.Module.NamedFunction(name)
+ if llvmFunction.IsNil() {
+ fti := u.llvmtypes.getSignatureInfo(f.Signature)
+ llvmFunction = fti.declare(u.module.Module, name)
+ u.undefinedFuncs[f] = true
+ }
+ u.globals[f] = llvmFunction
+ return llvmFunction
+}
+
+func (u *unit) getFunctionLinkage(f *ssa.Function) llvm.Linkage {
+ switch {
+ case f.Pkg == nil:
+ // Synthetic functions outside packages may appear in multiple packages.
+ return llvm.LinkOnceODRLinkage
+
+ case f.Parent() != nil:
+ // Anonymous.
+ return llvm.InternalLinkage
+
+ case f.Signature.Recv() == nil && !ast.IsExported(f.Name()) &&
+ !(f.Name() == "main" && f.Pkg.Object.Path() == "main") &&
+ f.Name() != "init":
+ // Unexported methods may be referenced as part of an interface method
+ // table in another package. TODO(pcc): detect when this cannot happen.
+ return llvm.InternalLinkage
+
+ default:
+ return llvm.ExternalLinkage
+ }
+}
+
+func (u *unit) defineFunction(f *ssa.Function) {
+ // Only define functions from this package, or synthetic
+ // wrappers (which do not have a package).
+ if f.Pkg != nil && f.Pkg != u.pkg {
+ return
+ }
+
+ llfn := u.resolveFunctionGlobal(f)
+ linkage := u.getFunctionLinkage(f)
+
+ isMethod := f.Signature.Recv() != nil
+
+ // Methods cannot be referred to via a descriptor.
+ if !isMethod {
+ llfd := u.resolveFunctionDescriptorGlobal(f)
+ llfd.SetInitializer(llvm.ConstBitCast(llfn, llvm.PointerType(llvm.Int8Type(), 0)))
+ llfd.SetLinkage(linkage)
+ }
+
+ // We only need to emit a descriptor for functions without bodies.
+ if len(f.Blocks) == 0 {
+ return
+ }
+
+ ssaopt.LowerAllocsToStack(f)
+
+ if u.DumpSSA {
+ f.WriteTo(os.Stderr)
+ }
+
+ fr := newFrame(u, llfn)
+ defer fr.dispose()
+ fr.addCommonFunctionAttrs(fr.function)
+ fr.function.SetLinkage(linkage)
+
+ fr.logf("Define function: %s", f.String())
+ fti := u.llvmtypes.getSignatureInfo(f.Signature)
+ delete(u.undefinedFuncs, f)
+ fr.retInf = fti.retInf
+
+ // Push the compile unit and function onto the debug context.
+ if u.GenerateDebug {
+ u.debug.PushFunction(fr.function, f.Signature, f.Pos())
+ defer u.debug.PopFunction()
+ u.debug.SetLocation(fr.builder, f.Pos())
+ }
+
+ // If a function calls recover, we create a separate function to
+ // hold the real function, and this function calls __go_can_recover
+ // and bridges to it.
+ if callsRecover(f) {
+ fr = fr.bridgeRecoverFunc(fr.function, fti)
+ }
+
+ fr.blocks = make([]llvm.BasicBlock, len(f.Blocks))
+ fr.lastBlocks = make([]llvm.BasicBlock, len(f.Blocks))
+ for i, block := range f.Blocks {
+ fr.blocks[i] = llvm.AddBasicBlock(fr.function, fmt.Sprintf(".%d.%s", i, block.Comment))
+ }
+ fr.builder.SetInsertPointAtEnd(fr.blocks[0])
+
+ prologueBlock := llvm.InsertBasicBlock(fr.blocks[0], "prologue")
+ fr.builder.SetInsertPointAtEnd(prologueBlock)
+
+ // Map parameter positions to indices. We use this
+ // when processing locals to map back to parameters
+ // when generating debug metadata.
+ paramPos := make(map[token.Pos]int)
+ for i, param := range f.Params {
+ paramPos[param.Pos()] = i
+ llparam := fti.argInfos[i].decode(llvm.GlobalContext(), fr.builder, fr.builder)
+ if isMethod && i == 0 {
+ if _, ok := param.Type().Underlying().(*types.Pointer); !ok {
+ llparam = fr.builder.CreateBitCast(llparam, llvm.PointerType(fr.types.ToLLVM(param.Type()), 0), "")
+ llparam = fr.builder.CreateLoad(llparam, "")
+ }
+ }
+ fr.env[param] = newValue(llparam, param.Type())
+ }
+
+ // Load closure, extract free vars.
+ if len(f.FreeVars) > 0 {
+ for _, fv := range f.FreeVars {
+ fr.env[fv] = newValue(llvm.ConstNull(u.llvmtypes.ToLLVM(fv.Type())), fv.Type())
+ }
+ elemTypes := make([]llvm.Type, len(f.FreeVars)+1)
+ elemTypes[0] = llvm.PointerType(llvm.Int8Type(), 0) // function pointer
+ for i, fv := range f.FreeVars {
+ elemTypes[i+1] = u.llvmtypes.ToLLVM(fv.Type())
+ }
+ structType := llvm.StructType(elemTypes, false)
+ closure := fr.runtime.getClosure.call(fr)[0]
+ closure = fr.builder.CreateBitCast(closure, llvm.PointerType(structType, 0), "")
+ for i, fv := range f.FreeVars {
+ ptr := fr.builder.CreateStructGEP(closure, i+1, "")
+ ptr = fr.builder.CreateLoad(ptr, "")
+ fr.env[fv] = newValue(ptr, fv.Type())
+ }
+ }
+
+ // Allocate stack space for locals in the prologue block.
+ for _, local := range f.Locals {
+ typ := fr.llvmtypes.ToLLVM(deref(local.Type()))
+ alloca := fr.builder.CreateAlloca(typ, local.Comment)
+ fr.memsetZero(alloca, llvm.SizeOf(typ))
+ bcalloca := fr.builder.CreateBitCast(alloca, llvm.PointerType(llvm.Int8Type(), 0), "")
+ value := newValue(bcalloca, local.Type())
+ fr.env[local] = value
+ if fr.GenerateDebug {
+ paramIndex, ok := paramPos[local.Pos()]
+ if !ok {
+ paramIndex = -1
+ }
+ fr.debug.Declare(fr.builder, local, alloca, paramIndex)
+ }
+ }
+
+ // If this is the "init" function, enable init-specific optimizations.
+ if !isMethod && f.Name() == "init" {
+ fr.isInit = true
+ }
+
+ // If the function contains any defers, we must first create
+ // an unwind block. We can short-circuit the check for defers with
+ // f.Recover != nil.
+ if f.Recover != nil || hasDefer(f) {
+ fr.unwindBlock = llvm.AddBasicBlock(fr.function, "")
+ fr.frameptr = fr.builder.CreateAlloca(llvm.Int8Type(), "")
+ }
+
+ term := fr.builder.CreateBr(fr.blocks[0])
+ fr.allocaBuilder.SetInsertPointBefore(term)
+
+ for _, block := range f.DomPreorder() {
+ fr.translateBlock(block, fr.blocks[block.Index])
+ }
+
+ fr.fixupPhis()
+
+ if !fr.unwindBlock.IsNil() {
+ fr.setupUnwindBlock(f.Recover, f.Signature.Results())
+ }
+
+ // The init function needs to register the GC roots first. We do this
+ // after generating code for it because allocations may have caused
+ // additional GC roots to be created.
+ if fr.isInit {
+ fr.builder.SetInsertPointBefore(prologueBlock.FirstInstruction())
+ fr.registerGcRoots()
+ }
+}
+
+type pendingPhi struct {
+ ssa *ssa.Phi
+ llvm llvm.Value
+}
+
+type frame struct {
+ *unit
+ function llvm.Value
+ builder, allocaBuilder llvm.Builder
+ retInf retInfo
+ blocks []llvm.BasicBlock
+ lastBlocks []llvm.BasicBlock
+ runtimeErrorBlocks [gccgoRuntimeErrorCount]llvm.BasicBlock
+ unwindBlock llvm.BasicBlock
+ frameptr llvm.Value
+ env map[ssa.Value]*govalue
+ ptr map[ssa.Value]llvm.Value
+ tuples map[ssa.Value][]*govalue
+ phis []pendingPhi
+ canRecover llvm.Value
+ isInit bool
+}
+
+func newFrame(u *unit, fn llvm.Value) *frame {
+ return &frame{
+ unit: u,
+ function: fn,
+ builder: llvm.GlobalContext().NewBuilder(),
+ allocaBuilder: llvm.GlobalContext().NewBuilder(),
+ env: make(map[ssa.Value]*govalue),
+ ptr: make(map[ssa.Value]llvm.Value),
+ tuples: make(map[ssa.Value][]*govalue),
+ }
+}
+
+func (fr *frame) dispose() {
+ fr.builder.Dispose()
+ fr.allocaBuilder.Dispose()
+}
+
+// bridgeRecoverFunc creates a function that may call recover(), and creates
+// a call to it from the current frame. The created function will be called
+// with a boolean parameter that indicates whether it may call recover().
+//
+// The created function will have the same name as the current frame's function
+// with "$recover" appended, having the same return types and parameters with
+// an additional boolean parameter appended.
+//
+// A new frame will be returned for the newly created function.
+func (fr *frame) bridgeRecoverFunc(llfn llvm.Value, fti functionTypeInfo) *frame {
+ // The bridging function must not be inlined, or the return address
+ // may not correspond to the source function.
+ llfn.AddFunctionAttr(llvm.NoInlineAttribute)
+
+ // Call __go_can_recover, passing in the function's return address.
+ entry := llvm.AddBasicBlock(llfn, "entry")
+ fr.builder.SetInsertPointAtEnd(entry)
+ canRecover := fr.runtime.canRecover.call(fr, fr.returnAddress(0))[0]
+ returnType := fti.functionType.ReturnType()
+ argTypes := fti.functionType.ParamTypes()
+ argTypes = append(argTypes, canRecover.Type())
+
+ // Create and call the $recover function.
+ ftiRecover := fti
+ ftiRecover.functionType = llvm.FunctionType(returnType, argTypes, false)
+ llfnRecover := ftiRecover.declare(fr.module.Module, llfn.Name()+"$recover")
+ fr.addCommonFunctionAttrs(llfnRecover)
+ llfnRecover.SetLinkage(llvm.InternalLinkage)
+ args := make([]llvm.Value, len(argTypes)-1, len(argTypes))
+ for i := range args {
+ args[i] = llfn.Param(i)
+ }
+ args = append(args, canRecover)
+ result := fr.builder.CreateCall(llfnRecover, args, "")
+ if returnType.TypeKind() == llvm.VoidTypeKind {
+ fr.builder.CreateRetVoid()
+ } else {
+ fr.builder.CreateRet(result)
+ }
+
+ // The $recover function must condition calls to __go_recover on
+ // the result of __go_can_recover passed in as an argument.
+ fr = newFrame(fr.unit, llfnRecover)
+ fr.retInf = ftiRecover.retInf
+ fr.canRecover = fr.function.Param(len(argTypes) - 1)
+ return fr
+}
+
+func (fr *frame) registerGcRoots() {
+ if len(fr.gcRoots) != 0 {
+ rootty := fr.gcRoots[0].Type()
+ roots := append(fr.gcRoots, llvm.ConstNull(rootty))
+ rootsarr := llvm.ConstArray(rootty, roots)
+ rootsstruct := llvm.ConstStruct([]llvm.Value{llvm.ConstNull(llvm.PointerType(llvm.Int8Type(), 0)), rootsarr}, false)
+
+ rootsglobal := llvm.AddGlobal(fr.module.Module, rootsstruct.Type(), "")
+ rootsglobal.SetInitializer(rootsstruct)
+ rootsglobal.SetLinkage(llvm.InternalLinkage)
+ fr.runtime.registerGcRoots.callOnly(fr, llvm.ConstBitCast(rootsglobal, llvm.PointerType(llvm.Int8Type(), 0)))
+ }
+}
+
+func (fr *frame) fixupPhis() {
+ for _, phi := range fr.phis {
+ values := make([]llvm.Value, len(phi.ssa.Edges))
+ blocks := make([]llvm.BasicBlock, len(phi.ssa.Edges))
+ block := phi.ssa.Block()
+ for i, edge := range phi.ssa.Edges {
+ values[i] = fr.llvmvalue(edge)
+ blocks[i] = fr.lastBlock(block.Preds[i])
+ }
+ phi.llvm.AddIncoming(values, blocks)
+ }
+}
+
+func (fr *frame) createLandingPad(cleanup bool) llvm.Value {
+ lp := fr.builder.CreateLandingPad(fr.runtime.gccgoExceptionType, fr.runtime.gccgoPersonality, 0, "")
+ if cleanup {
+ lp.SetCleanup(true)
+ } else {
+ lp.AddClause(llvm.ConstNull(llvm.PointerType(llvm.Int8Type(), 0)))
+ }
+ return lp
+}
+
+// Runs defers. If a defer panics, check for recovers in later defers.
+func (fr *frame) runDefers() {
+ loopbb := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.CreateBr(loopbb)
+
+ retrylpad := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.SetInsertPointAtEnd(retrylpad)
+ fr.createLandingPad(false)
+ fr.runtime.checkDefer.callOnly(fr, fr.frameptr)
+ fr.builder.CreateBr(loopbb)
+
+ fr.builder.SetInsertPointAtEnd(loopbb)
+ fr.runtime.undefer.invoke(fr, retrylpad, fr.frameptr)
+}
+
+func (fr *frame) setupUnwindBlock(rec *ssa.BasicBlock, results *types.Tuple) {
+ recoverbb := llvm.AddBasicBlock(fr.function, "")
+ if rec != nil {
+ fr.translateBlock(rec, recoverbb)
+ } else if results.Len() == 0 || results.At(0).Anonymous() {
+ // TODO(pcc): Remove this code after https://codereview.appspot.com/87210044/ lands
+ fr.builder.SetInsertPointAtEnd(recoverbb)
+ values := make([]llvm.Value, results.Len())
+ for i := range values {
+ values[i] = llvm.ConstNull(fr.llvmtypes.ToLLVM(results.At(i).Type()))
+ }
+ fr.retInf.encode(llvm.GlobalContext(), fr.allocaBuilder, fr.builder, values)
+ } else {
+ fr.builder.SetInsertPointAtEnd(recoverbb)
+ fr.builder.CreateUnreachable()
+ }
+
+ checkunwindbb := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.SetInsertPointAtEnd(checkunwindbb)
+ exc := fr.createLandingPad(true)
+ fr.runDefers()
+
+ frame := fr.builder.CreateLoad(fr.frameptr, "")
+ shouldresume := fr.builder.CreateIsNull(frame, "")
+
+ resumebb := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.CreateCondBr(shouldresume, resumebb, recoverbb)
+
+ fr.builder.SetInsertPointAtEnd(resumebb)
+ fr.builder.CreateResume(exc)
+
+ fr.builder.SetInsertPointAtEnd(fr.unwindBlock)
+ fr.createLandingPad(false)
+ fr.runtime.checkDefer.invoke(fr, checkunwindbb, fr.frameptr)
+ fr.runDefers()
+ fr.builder.CreateBr(recoverbb)
+}
+
+func (fr *frame) translateBlock(b *ssa.BasicBlock, llb llvm.BasicBlock) {
+ fr.builder.SetInsertPointAtEnd(llb)
+ for _, instr := range b.Instrs {
+ fr.instruction(instr)
+ }
+ fr.lastBlocks[b.Index] = fr.builder.GetInsertBlock()
+}
+
+func (fr *frame) block(b *ssa.BasicBlock) llvm.BasicBlock {
+ return fr.blocks[b.Index]
+}
+
+func (fr *frame) lastBlock(b *ssa.BasicBlock) llvm.BasicBlock {
+ return fr.lastBlocks[b.Index]
+}
+
+func (fr *frame) value(v ssa.Value) (result *govalue) {
+ switch v := v.(type) {
+ case nil:
+ return nil
+ case *ssa.Function:
+ return fr.resolveFunctionDescriptor(v)
+ case *ssa.Const:
+ return fr.newValueFromConst(v.Value, v.Type())
+ case *ssa.Global:
+ if g, ok := fr.globals[v]; ok {
+ return newValue(g, v.Type())
+ }
+ // Create an external global. Globals for this package are defined
+ // on entry to translatePackage, and have initialisers.
+ llelemtyp := fr.llvmtypes.ToLLVM(deref(v.Type()))
+ vname := fr.types.mc.mangleGlobalName(v)
+ llglobal := llvm.AddGlobal(fr.module.Module, llelemtyp, vname)
+ llglobal = llvm.ConstBitCast(llglobal, fr.llvmtypes.ToLLVM(v.Type()))
+ fr.globals[v] = llglobal
+ return newValue(llglobal, v.Type())
+ }
+ if value, ok := fr.env[v]; ok {
+ return value
+ }
+
+ panic("Instruction not visited yet")
+}
+
+func (fr *frame) llvmvalue(v ssa.Value) llvm.Value {
+ if gv := fr.value(v); gv != nil {
+ return gv.value
+ } else {
+ return llvm.Value{nil}
+ }
+}
+
+func (fr *frame) isNonNull(v ssa.Value) bool {
+ switch v.(type) {
+ case
+ // Globals have a fixed (non-nil) address.
+ *ssa.Global,
+ // The language does not specify what happens if an allocation fails.
+ *ssa.Alloc,
+ // These have already been nil checked.
+ *ssa.FieldAddr, *ssa.IndexAddr:
+ return true
+ default:
+ return false
+ }
+}
+
+func (fr *frame) nilCheck(v ssa.Value, llptr llvm.Value) {
+ if !fr.isNonNull(v) {
+ ptrnull := fr.builder.CreateIsNull(llptr, "")
+ fr.condBrRuntimeError(ptrnull, gccgoRuntimeErrorNIL_DEREFERENCE)
+ }
+}
+
+func (fr *frame) canAvoidElementLoad(refs []ssa.Instruction) bool {
+ for _, ref := range refs {
+ switch ref.(type) {
+ case *ssa.Field, *ssa.Index:
+ // ok
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+// If this value is sufficiently large, look through referrers to see if we can
+// avoid a load.
+func (fr *frame) canAvoidLoad(instr *ssa.UnOp, op llvm.Value) bool {
+ if fr.types.Sizeof(instr.Type()) < 16 {
+ // Don't bother with small values.
+ return false
+ }
+
+ // Keep track of whether our pointer may escape. We conservatively assume
+ // that MakeInterfaces will escape.
+ esc := false
+
+ // We only know how to avoid loads if they are used to create an interface
+ // or read an element of the structure. If we see any other referrer, abort.
+ for _, ref := range *instr.Referrers() {
+ switch ref.(type) {
+ case *ssa.MakeInterface:
+ esc = true
+ case *ssa.Field, *ssa.Index:
+ // ok
+ default:
+ return false
+ }
+ }
+
+ var opcopy llvm.Value
+ if esc {
+ opcopy = fr.createTypeMalloc(instr.Type())
+ } else {
+ opcopy = fr.allocaBuilder.CreateAlloca(fr.types.ToLLVM(instr.Type()), "")
+ }
+ fr.memcpy(opcopy, op, llvm.ConstInt(fr.types.inttype, uint64(fr.types.Sizeof(instr.Type())), false))
+
+ fr.ptr[instr] = opcopy
+ return true
+}
+
+// Return true iff we think it might be beneficial to turn this alloc instruction
+// into a statically allocated global.
+// Precondition: we are compiling the init function.
+func (fr *frame) shouldStaticallyAllocate(alloc *ssa.Alloc) bool {
+ // First, see if the allocated type is an array or struct, and if so determine
+ // the number of elements in the type. If the type is anything else, we
+ // statically allocate unconditionally.
+ var numElems int64
+ switch ty := deref(alloc.Type()).Underlying().(type) {
+ case *types.Array:
+ numElems = ty.Len()
+ case *types.Struct:
+ numElems = int64(ty.NumFields())
+ default:
+ return true
+ }
+
+ // We treat the number of referrers to the alloc instruction as a rough
+ // proxy for the number of elements initialized. If the data structure
+ // is densely initialized (> 1/4 elements initialized), enable the
+ // optimization.
+ return int64(len(*alloc.Referrers()))*4 > numElems
+}
+
+// If val is a constant and addr refers to a global variable which is defined in
+// this module or an element thereof, simulate the effect of storing val at addr
+// in the global variable's initializer and return true, otherwise return false.
+// Precondition: we are compiling the init function.
+func (fr *frame) maybeStoreInInitializer(val, addr llvm.Value) bool {
+ if val.IsAConstant().IsNil() {
+ return false
+ }
+
+ if !addr.IsAConstantExpr().IsNil() && addr.OperandsCount() >= 2 &&
+ // TODO(pcc): Explicitly check that this is a constant GEP.
+ // I don't think there are any other kinds of constantexpr which
+ // satisfy the conditions we test for here, so this is probably safe.
+ !addr.Operand(0).IsAGlobalVariable().IsNil() &&
+ addr.Operand(1).IsNull() {
+ gv := addr.Operand(0)
+ globalInit, ok := fr.globalInits[gv]
+ if !ok {
+ return false
+ }
+ indices := make([]uint32, addr.OperandsCount()-2)
+ for i := range indices {
+ op := addr.Operand(i + 2)
+ if op.IsAConstantInt().IsNil() {
+ return false
+ }
+ indices[i] = uint32(op.ZExtValue())
+ }
+ globalInit.update(gv.Type().ElementType(), indices, val)
+ return true
+ } else if !addr.IsAGlobalVariable().IsNil() {
+ if globalInit, ok := fr.globalInits[addr]; ok {
+ globalInit.update(addr.Type().ElementType(), nil, val)
+ return true
+ }
+ return false
+ } else {
+ return false
+ }
+}
+
+func (fr *frame) instruction(instr ssa.Instruction) {
+ fr.logf("[%T] %v @ %s\n", instr, instr, fr.pkg.Prog.Fset.Position(instr.Pos()))
+ if fr.GenerateDebug {
+ fr.debug.SetLocation(fr.builder, instr.Pos())
+ }
+
+ switch instr := instr.(type) {
+ case *ssa.Alloc:
+ typ := deref(instr.Type())
+ llvmtyp := fr.llvmtypes.ToLLVM(typ)
+ var value llvm.Value
+ if !instr.Heap {
+ value = fr.env[instr].value
+ fr.memsetZero(value, llvm.SizeOf(llvmtyp))
+ } else if fr.isInit && fr.shouldStaticallyAllocate(instr) {
+ // If this is the init function and we think it may be beneficial,
+ // allocate memory statically in the object file rather than on the
+ // heap. This allows us to optimize constant stores into such
+ // variables as static initializations.
+ global := llvm.AddGlobal(fr.module.Module, llvmtyp, "")
+ global.SetLinkage(llvm.InternalLinkage)
+ fr.addGlobal(global, typ)
+ ptr := llvm.ConstBitCast(global, llvm.PointerType(llvm.Int8Type(), 0))
+ fr.env[instr] = newValue(ptr, instr.Type())
+ } else {
+ value = fr.createTypeMalloc(typ)
+ value.SetName(instr.Comment)
+ value = fr.builder.CreateBitCast(value, llvm.PointerType(llvm.Int8Type(), 0), "")
+ fr.env[instr] = newValue(value, instr.Type())
+ }
+
+ case *ssa.BinOp:
+ lhs, rhs := fr.value(instr.X), fr.value(instr.Y)
+ fr.env[instr] = fr.binaryOp(lhs, instr.Op, rhs)
+
+ case *ssa.Call:
+ tuple := fr.callInstruction(instr)
+ if len(tuple) == 1 {
+ fr.env[instr] = tuple[0]
+ } else {
+ fr.tuples[instr] = tuple
+ }
+
+ case *ssa.ChangeInterface:
+ x := fr.value(instr.X)
+ // The source type must be a non-empty interface,
+ // as ChangeInterface cannot fail (E2I may fail).
+ if instr.Type().Underlying().(*types.Interface).NumMethods() > 0 {
+ x = fr.changeInterface(x, instr.Type(), false)
+ } else {
+ x = fr.convertI2E(x)
+ }
+ fr.env[instr] = x
+
+ case *ssa.ChangeType:
+ value := fr.llvmvalue(instr.X)
+ if _, ok := instr.Type().Underlying().(*types.Pointer); ok {
+ value = fr.builder.CreateBitCast(value, fr.llvmtypes.ToLLVM(instr.Type()), "")
+ }
+ fr.env[instr] = newValue(value, instr.Type())
+
+ case *ssa.Convert:
+ v := fr.value(instr.X)
+ fr.env[instr] = fr.convert(v, instr.Type())
+
+ case *ssa.Defer:
+ fn, arg := fr.createThunk(instr)
+ fr.runtime.Defer.call(fr, fr.frameptr, fn, arg)
+
+ case *ssa.Extract:
+ var elem llvm.Value
+ if t, ok := fr.tuples[instr.Tuple]; ok {
+ elem = t[instr.Index].value
+ } else {
+ tuple := fr.llvmvalue(instr.Tuple)
+ elem = fr.builder.CreateExtractValue(tuple, instr.Index, instr.Name())
+ }
+ elemtyp := instr.Type()
+ fr.env[instr] = newValue(elem, elemtyp)
+
+ case *ssa.Field:
+ fieldtyp := instr.Type()
+ if p, ok := fr.ptr[instr.X]; ok {
+ field := fr.builder.CreateStructGEP(p, instr.Field, instr.Name())
+ if fr.canAvoidElementLoad(*instr.Referrers()) {
+ fr.ptr[instr] = field
+ } else {
+ fr.env[instr] = newValue(fr.builder.CreateLoad(field, ""), fieldtyp)
+ }
+ } else {
+ value := fr.llvmvalue(instr.X)
+ field := fr.builder.CreateExtractValue(value, instr.Field, instr.Name())
+ fr.env[instr] = newValue(field, fieldtyp)
+ }
+
+ case *ssa.FieldAddr:
+ ptr := fr.llvmvalue(instr.X)
+ fr.nilCheck(instr.X, ptr)
+ xtyp := instr.X.Type().Underlying().(*types.Pointer).Elem()
+ ptrtyp := llvm.PointerType(fr.llvmtypes.ToLLVM(xtyp), 0)
+ ptr = fr.builder.CreateBitCast(ptr, ptrtyp, "")
+ fieldptr := fr.builder.CreateStructGEP(ptr, instr.Field, instr.Name())
+ fieldptr = fr.builder.CreateBitCast(fieldptr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ fieldptrtyp := instr.Type()
+ fr.env[instr] = newValue(fieldptr, fieldptrtyp)
+
+ case *ssa.Go:
+ fn, arg := fr.createThunk(instr)
+ fr.runtime.Go.call(fr, fn, arg)
+
+ case *ssa.If:
+ cond := fr.llvmvalue(instr.Cond)
+ block := instr.Block()
+ trueBlock := fr.block(block.Succs[0])
+ falseBlock := fr.block(block.Succs[1])
+ cond = fr.builder.CreateTrunc(cond, llvm.Int1Type(), "")
+ fr.builder.CreateCondBr(cond, trueBlock, falseBlock)
+
+ case *ssa.Index:
+ var arrayptr llvm.Value
+
+ if ptr, ok := fr.ptr[instr.X]; ok {
+ arrayptr = ptr
+ } else {
+ array := fr.llvmvalue(instr.X)
+ arrayptr = fr.allocaBuilder.CreateAlloca(array.Type(), "")
+
+ fr.builder.CreateStore(array, arrayptr)
+ }
+ index := fr.llvmvalue(instr.Index)
+
+ arraytyp := instr.X.Type().Underlying().(*types.Array)
+ arraylen := llvm.ConstInt(fr.llvmtypes.inttype, uint64(arraytyp.Len()), false)
+
+ // The index may not have been promoted to int (for example, if it
+ // came from a composite literal).
+ index = fr.createZExtOrTrunc(index, fr.types.inttype, "")
+
+ // Bounds checking: 0 <= index < len
+ zero := llvm.ConstNull(fr.types.inttype)
+ i0 := fr.builder.CreateICmp(llvm.IntSLT, index, zero, "")
+ li := fr.builder.CreateICmp(llvm.IntSLE, arraylen, index, "")
+
+ cond := fr.builder.CreateOr(i0, li, "")
+
+ fr.condBrRuntimeError(cond, gccgoRuntimeErrorARRAY_INDEX_OUT_OF_BOUNDS)
+
+ addr := fr.builder.CreateGEP(arrayptr, []llvm.Value{zero, index}, "")
+ if fr.canAvoidElementLoad(*instr.Referrers()) {
+ fr.ptr[instr] = addr
+ } else {
+ fr.env[instr] = newValue(fr.builder.CreateLoad(addr, ""), instr.Type())
+ }
+
+ case *ssa.IndexAddr:
+ x := fr.llvmvalue(instr.X)
+ index := fr.llvmvalue(instr.Index)
+ var arrayptr, arraylen llvm.Value
+ var elemtyp types.Type
+ var errcode uint64
+ switch typ := instr.X.Type().Underlying().(type) {
+ case *types.Slice:
+ elemtyp = typ.Elem()
+ arrayptr = fr.builder.CreateExtractValue(x, 0, "")
+ arraylen = fr.builder.CreateExtractValue(x, 1, "")
+ errcode = gccgoRuntimeErrorSLICE_INDEX_OUT_OF_BOUNDS
+ case *types.Pointer: // *array
+ arraytyp := typ.Elem().Underlying().(*types.Array)
+ elemtyp = arraytyp.Elem()
+ fr.nilCheck(instr.X, x)
+ arrayptr = x
+ arraylen = llvm.ConstInt(fr.llvmtypes.inttype, uint64(arraytyp.Len()), false)
+ errcode = gccgoRuntimeErrorARRAY_INDEX_OUT_OF_BOUNDS
+ }
+
+ // The index may not have been promoted to int (for example, if it
+ // came from a composite literal).
+ index = fr.createZExtOrTrunc(index, fr.types.inttype, "")
+
+ // Bounds checking: 0 <= index < len
+ zero := llvm.ConstNull(fr.types.inttype)
+ i0 := fr.builder.CreateICmp(llvm.IntSLT, index, zero, "")
+ li := fr.builder.CreateICmp(llvm.IntSLE, arraylen, index, "")
+
+ cond := fr.builder.CreateOr(i0, li, "")
+
+ fr.condBrRuntimeError(cond, errcode)
+
+ ptrtyp := llvm.PointerType(fr.llvmtypes.ToLLVM(elemtyp), 0)
+ arrayptr = fr.builder.CreateBitCast(arrayptr, ptrtyp, "")
+ addr := fr.builder.CreateGEP(arrayptr, []llvm.Value{index}, "")
+ addr = fr.builder.CreateBitCast(addr, llvm.PointerType(llvm.Int8Type(), 0), "")
+ fr.env[instr] = newValue(addr, types.NewPointer(elemtyp))
+
+ case *ssa.Jump:
+ succ := instr.Block().Succs[0]
+ fr.builder.CreateBr(fr.block(succ))
+
+ case *ssa.Lookup:
+ x := fr.value(instr.X)
+ index := fr.value(instr.Index)
+ if isString(x.Type().Underlying()) {
+ fr.env[instr] = fr.stringIndex(x, index)
+ } else {
+ v, ok := fr.mapLookup(x, index)
+ if instr.CommaOk {
+ fr.tuples[instr] = []*govalue{v, ok}
+ } else {
+ fr.env[instr] = v
+ }
+ }
+
+ case *ssa.MakeChan:
+ fr.env[instr] = fr.makeChan(instr.Type(), fr.value(instr.Size))
+
+ case *ssa.MakeClosure:
+ llfn := fr.resolveFunctionGlobal(instr.Fn.(*ssa.Function))
+ llfn = llvm.ConstBitCast(llfn, llvm.PointerType(llvm.Int8Type(), 0))
+ fn := newValue(llfn, instr.Fn.(*ssa.Function).Signature)
+ bindings := make([]*govalue, len(instr.Bindings))
+ for i, binding := range instr.Bindings {
+ bindings[i] = fr.value(binding)
+ }
+ fr.env[instr] = fr.makeClosure(fn, bindings)
+
+ case *ssa.MakeInterface:
+ // fr.ptr[instr.X] will be set if a pointer load was elided by canAvoidLoad
+ if ptr, ok := fr.ptr[instr.X]; ok {
+ fr.env[instr] = fr.makeInterfaceFromPointer(ptr, instr.X.Type(), instr.Type())
+ } else {
+ receiver := fr.llvmvalue(instr.X)
+ fr.env[instr] = fr.makeInterface(receiver, instr.X.Type(), instr.Type())
+ }
+
+ case *ssa.MakeMap:
+ fr.env[instr] = fr.makeMap(instr.Type(), fr.value(instr.Reserve))
+
+ case *ssa.MakeSlice:
+ length := fr.value(instr.Len)
+ capacity := fr.value(instr.Cap)
+ fr.env[instr] = fr.makeSlice(instr.Type(), length, capacity)
+
+ case *ssa.MapUpdate:
+ m := fr.value(instr.Map)
+ k := fr.value(instr.Key)
+ v := fr.value(instr.Value)
+ fr.mapUpdate(m, k, v)
+
+ case *ssa.Next:
+ iter := fr.tuples[instr.Iter]
+ if instr.IsString {
+ fr.tuples[instr] = fr.stringIterNext(iter)
+ } else {
+ fr.tuples[instr] = fr.mapIterNext(iter)
+ }
+
+ case *ssa.Panic:
+ arg := fr.value(instr.X)
+ fr.callPanic(arg)
+
+ case *ssa.Phi:
+ typ := instr.Type()
+ phi := fr.builder.CreatePHI(fr.llvmtypes.ToLLVM(typ), instr.Comment)
+ fr.env[instr] = newValue(phi, typ)
+ fr.phis = append(fr.phis, pendingPhi{instr, phi})
+
+ case *ssa.Range:
+ x := fr.value(instr.X)
+ switch x.Type().Underlying().(type) {
+ case *types.Map:
+ fr.tuples[instr] = fr.mapIterInit(x)
+ case *types.Basic: // string
+ fr.tuples[instr] = fr.stringIterInit(x)
+ default:
+ panic(fmt.Sprintf("unhandled range for type %T", x.Type()))
+ }
+
+ case *ssa.Return:
+ vals := make([]llvm.Value, len(instr.Results))
+ for i, res := range instr.Results {
+ vals[i] = fr.llvmvalue(res)
+ }
+ fr.retInf.encode(llvm.GlobalContext(), fr.allocaBuilder, fr.builder, vals)
+
+ case *ssa.RunDefers:
+ fr.runDefers()
+
+ case *ssa.Select:
+ states := make([]selectState, len(instr.States))
+ for i, state := range instr.States {
+ states[i] = selectState{
+ Dir: state.Dir,
+ Chan: fr.value(state.Chan),
+ Send: fr.value(state.Send),
+ }
+ }
+ index, recvOk, recvElems := fr.chanSelect(states, instr.Blocking)
+ tuple := append([]*govalue{index, recvOk}, recvElems...)
+ fr.tuples[instr] = tuple
+
+ case *ssa.Send:
+ fr.chanSend(fr.value(instr.Chan), fr.value(instr.X))
+
+ case *ssa.Slice:
+ x := fr.llvmvalue(instr.X)
+ low := fr.llvmvalue(instr.Low)
+ high := fr.llvmvalue(instr.High)
+ max := fr.llvmvalue(instr.Max)
+ slice := fr.slice(x, instr.X.Type(), low, high, max)
+ fr.env[instr] = newValue(slice, instr.Type())
+
+ case *ssa.Store:
+ addr := fr.llvmvalue(instr.Addr)
+ value := fr.llvmvalue(instr.Val)
+ addr = fr.builder.CreateBitCast(addr, llvm.PointerType(value.Type(), 0), "")
+ // If this is the init function, see if we can simulate the effect
+ // of the store in a global's initializer, in which case we can avoid
+ // generating code for it.
+ if !fr.isInit || !fr.maybeStoreInInitializer(value, addr) {
+ fr.nilCheck(instr.Addr, addr)
+ fr.builder.CreateStore(value, addr)
+ }
+
+ case *ssa.TypeAssert:
+ x := fr.value(instr.X)
+ if instr.CommaOk {
+ v, ok := fr.interfaceTypeCheck(x, instr.AssertedType)
+ fr.tuples[instr] = []*govalue{v, ok}
+ } else {
+ fr.env[instr] = fr.interfaceTypeAssert(x, instr.AssertedType)
+ }
+
+ case *ssa.UnOp:
+ operand := fr.value(instr.X)
+ switch instr.Op {
+ case token.ARROW:
+ x, ok := fr.chanRecv(operand, instr.CommaOk)
+ if instr.CommaOk {
+ fr.tuples[instr] = []*govalue{x, ok}
+ } else {
+ fr.env[instr] = x
+ }
+ case token.MUL:
+ fr.nilCheck(instr.X, operand.value)
+ if !fr.canAvoidLoad(instr, operand.value) {
+ // The bitcast is necessary to handle recursive pointer loads.
+ llptr := fr.builder.CreateBitCast(operand.value, llvm.PointerType(fr.llvmtypes.ToLLVM(instr.Type()), 0), "")
+ fr.env[instr] = newValue(fr.builder.CreateLoad(llptr, ""), instr.Type())
+ }
+ default:
+ fr.env[instr] = fr.unaryOp(operand, instr.Op)
+ }
+
+ default:
+ panic(fmt.Sprintf("unhandled: %v", instr))
+ }
+}
+
+func (fr *frame) callBuiltin(typ types.Type, builtin *ssa.Builtin, args []ssa.Value) []*govalue {
+ switch builtin.Name() {
+ case "print", "println":
+ llargs := make([]*govalue, len(args))
+ for i, arg := range args {
+ llargs[i] = fr.value(arg)
+ }
+ fr.printValues(builtin.Name() == "println", llargs...)
+ return nil
+
+ case "panic":
+ fr.callPanic(fr.value(args[0]))
+ return nil
+
+ case "recover":
+ return []*govalue{fr.callRecover(false)}
+
+ case "append":
+ return []*govalue{fr.callAppend(fr.value(args[0]), fr.value(args[1]))}
+
+ case "close":
+ fr.chanClose(fr.value(args[0]))
+ return nil
+
+ case "cap":
+ return []*govalue{fr.callCap(fr.value(args[0]))}
+
+ case "len":
+ return []*govalue{fr.callLen(fr.value(args[0]))}
+
+ case "copy":
+ return []*govalue{fr.callCopy(fr.value(args[0]), fr.value(args[1]))}
+
+ case "delete":
+ fr.mapDelete(fr.value(args[0]), fr.value(args[1]))
+ return nil
+
+ case "real":
+ return []*govalue{fr.extractRealValue(fr.value(args[0]))}
+
+ case "imag":
+ return []*govalue{fr.extractImagValue(fr.value(args[0]))}
+
+ case "complex":
+ r := fr.llvmvalue(args[0])
+ i := fr.llvmvalue(args[1])
+ cmplx := llvm.Undef(fr.llvmtypes.ToLLVM(typ))
+ cmplx = fr.builder.CreateInsertValue(cmplx, r, 0, "")
+ cmplx = fr.builder.CreateInsertValue(cmplx, i, 1, "")
+ return []*govalue{newValue(cmplx, typ)}
+
+ case "ssa:wrapnilchk":
+ ptr := fr.value(args[0])
+ fr.nilCheck(args[0], ptr.value)
+ return []*govalue{ptr}
+
+ default:
+ panic("unimplemented: " + builtin.Name())
+ }
+}
+
+// callInstruction translates function call instructions.
+func (fr *frame) callInstruction(instr ssa.CallInstruction) []*govalue {
+ call := instr.Common()
+ if builtin, ok := call.Value.(*ssa.Builtin); ok {
+ var typ types.Type
+ if v := instr.Value(); v != nil {
+ typ = v.Type()
+ }
+ return fr.callBuiltin(typ, builtin, call.Args)
+ }
+
+ args := make([]*govalue, len(call.Args))
+ for i, arg := range call.Args {
+ args[i] = fr.value(arg)
+ }
+
+ var fn *govalue
+ if call.IsInvoke() {
+ var recv *govalue
+ fn, recv = fr.interfaceMethod(fr.llvmvalue(call.Value), call.Value.Type(), call.Method)
+ args = append([]*govalue{recv}, args...)
+ } else {
+ if ssafn, ok := call.Value.(*ssa.Function); ok {
+ llfn := fr.resolveFunctionGlobal(ssafn)
+ llfn = llvm.ConstBitCast(llfn, llvm.PointerType(llvm.Int8Type(), 0))
+ fn = newValue(llfn, ssafn.Type())
+ } else {
+ // First-class function values are stored as *{*fnptr}, so
+ // we must extract the function pointer. We must also
+ // call __go_set_closure, in case the function is a closure.
+ fn = fr.value(call.Value)
+ fr.runtime.setClosure.call(fr, fn.value)
+ fnptr := fr.builder.CreateBitCast(fn.value, llvm.PointerType(fn.value.Type(), 0), "")
+ fnptr = fr.builder.CreateLoad(fnptr, "")
+ fn = newValue(fnptr, fn.Type())
+ }
+ if recv := call.Signature().Recv(); recv != nil {
+ if _, ok := recv.Type().Underlying().(*types.Pointer); !ok {
+ recvalloca := fr.allocaBuilder.CreateAlloca(args[0].value.Type(), "")
+ fr.builder.CreateStore(args[0].value, recvalloca)
+ args[0] = newValue(recvalloca, types.NewPointer(args[0].Type()))
+ }
+ }
+ }
+ return fr.createCall(fn, args)
+}
+
+func hasDefer(f *ssa.Function) bool {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if _, ok := instr.(*ssa.Defer); ok {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func callsRecover(f *ssa.Function) bool {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if instr, ok := instr.(ssa.CallInstruction); ok {
+ b, ok := instr.Common().Value.(*ssa.Builtin)
+ if ok && b.Name() == "recover" {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/irgen/strings.go b/irgen/strings.go
new file mode 100644
index 0000000..4a6554b
--- /dev/null
+++ b/irgen/strings.go
@@ -0,0 +1,97 @@
+//===- strings.go - IR generation for string ops --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements IR generation for string operations.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "go/token"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+func (fr *frame) concatenateStrings(lhs, rhs *govalue) *govalue {
+ result := fr.runtime.stringPlus.call(fr, lhs.value, rhs.value)
+ return newValue(result[0], types.Typ[types.String])
+}
+
+func (fr *frame) compareStrings(lhs, rhs *govalue, op token.Token) *govalue {
+ result := fr.runtime.strcmp.call(fr, lhs.value, rhs.value)[0]
+ zero := llvm.ConstNull(fr.types.inttype)
+ var pred llvm.IntPredicate
+ switch op {
+ case token.EQL:
+ pred = llvm.IntEQ
+ case token.LSS:
+ pred = llvm.IntSLT
+ case token.GTR:
+ pred = llvm.IntSGT
+ case token.LEQ:
+ pred = llvm.IntSLE
+ case token.GEQ:
+ pred = llvm.IntSGE
+ case token.NEQ:
+ panic("NEQ is handled in govalue.BinaryOp")
+ default:
+ panic("unreachable")
+ }
+ result = fr.builder.CreateICmp(pred, result, zero, "")
+ result = fr.builder.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+}
+
+// stringIndex implements v = m[i]
+func (fr *frame) stringIndex(s, i *govalue) *govalue {
+ ptr := fr.builder.CreateExtractValue(s.value, 0, "")
+ ptr = fr.builder.CreateGEP(ptr, []llvm.Value{i.value}, "")
+ return newValue(fr.builder.CreateLoad(ptr, ""), types.Typ[types.Byte])
+}
+
+func (fr *frame) stringIterInit(str *govalue) []*govalue {
+ indexptr := fr.allocaBuilder.CreateAlloca(fr.types.inttype, "")
+ fr.builder.CreateStore(llvm.ConstNull(fr.types.inttype), indexptr)
+ return []*govalue{str, newValue(indexptr, types.Typ[types.Int])}
+}
+
+// stringIterNext advances the iterator, and returns the tuple (ok, k, v).
+func (fr *frame) stringIterNext(iter []*govalue) []*govalue {
+ str, indexptr := iter[0], iter[1]
+ k := fr.builder.CreateLoad(indexptr.value, "")
+
+ result := fr.runtime.stringiter2.call(fr, str.value, k)
+ fr.builder.CreateStore(result[0], indexptr.value)
+ ok := fr.builder.CreateIsNotNull(result[0], "")
+ ok = fr.builder.CreateZExt(ok, llvm.Int8Type(), "")
+ v := result[1]
+
+ return []*govalue{newValue(ok, types.Typ[types.Bool]), newValue(k, types.Typ[types.Int]), newValue(v, types.Typ[types.Rune])}
+}
+
+func (fr *frame) runeToString(v *govalue) *govalue {
+ v = fr.convert(v, types.Typ[types.Int])
+ result := fr.runtime.intToString.call(fr, v.value)
+ return newValue(result[0], types.Typ[types.String])
+}
+
+func (fr *frame) stringToRuneSlice(v *govalue) *govalue {
+ result := fr.runtime.stringToIntArray.call(fr, v.value)
+ runeslice := types.NewSlice(types.Typ[types.Rune])
+ return newValue(result[0], runeslice)
+}
+
+func (fr *frame) runeSliceToString(v *govalue) *govalue {
+ llv := v.value
+ ptr := fr.builder.CreateExtractValue(llv, 0, "")
+ len := fr.builder.CreateExtractValue(llv, 1, "")
+ result := fr.runtime.intArrayToString.call(fr, ptr, len)
+ return newValue(result[0], types.Typ[types.String])
+}
diff --git a/irgen/targets.go b/irgen/targets.go
new file mode 100644
index 0000000..7715ab1
--- /dev/null
+++ b/irgen/targets.go
@@ -0,0 +1,102 @@
+//===- targets.go - target data -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions for retrieving target-specific data.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+ "strings"
+
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// PNaClTriple is the LLVM target triple that should be used to compile
+// modules to be compatible with PNaCl (Portable Native Client).
+const PNaClTriple = "armv7-none-linux-gnueabi"
+
+// Below are the target data representation constants generated by clang.
+// For unknown targets, we enumerate all targets known to LLVM and use
+// the first one with a matching architecture.
+const (
+ x86TargetData = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+)
+
+// llvmDataLayout returns the data layout string
+// representation for the specified LLVM triple.
+func llvmDataLayout(triple string) (string, error) {
+ // Triples are several fields separated by '-' characters.
+ // The first field is the architecture. The architecture's
+ // canonical form may include a '-' character, which would
+ // have been translated to '_' for inclusion in a triple.
+ arch := parseArch(triple[:strings.IndexRune(triple, '-')])
+ switch arch {
+ case "x86-64":
+ return x86TargetData, nil
+ }
+ for target := llvm.FirstTarget(); target.C != nil; target = target.NextTarget() {
+ if arch == target.Name() {
+ machine := target.CreateTargetMachine(
+ triple, "", "",
+ llvm.CodeGenLevelDefault,
+ llvm.RelocDefault,
+ llvm.CodeModelDefault,
+ )
+ target := machine.TargetData().String()
+ machine.Dispose()
+ return target, nil
+ }
+ }
+ return "", fmt.Errorf("Invalid target triple: %s", triple)
+}
+
+// Based on parseArch from LLVM's lib/Support/Triple.cpp.
+// This is used to match the target machine type.
+func parseArch(arch string) string {
+ switch arch {
+ case "i386", "i486", "i586", "i686", "i786", "i886", "i986":
+ return "x86"
+ case "amd64", "x86_64":
+ return "x86-64"
+ case "powerpc":
+ return "ppc"
+ case "powerpc64", "ppu":
+ return "ppc64"
+ case "mblaze":
+ return "mblaze"
+ case "arm", "xscale":
+ return "arm"
+ case "thumb":
+ return "thumb"
+ case "spu", "cellspu":
+ return "cellspu"
+ case "msp430":
+ return "msp430"
+ case "mips", "mipseb", "mipsallegrex":
+ return "mips"
+ case "mipsel", "mipsallegrexel":
+ return "mipsel"
+ case "mips64", "mips64eb":
+ return "mips64"
+ case "mipsel64":
+ return "mipsel64"
+ case "r600", "hexagon", "sparc", "sparcv9", "tce",
+ "xcore", "nvptx", "nvptx64", "le32", "amdil":
+ return arch
+ }
+ if strings.HasPrefix(arch, "armv") {
+ return "arm"
+ } else if strings.HasPrefix(arch, "thumbv") {
+ return "thumb"
+ }
+ return "unknown"
+}
diff --git a/irgen/typemap.go b/irgen/typemap.go
new file mode 100644
index 0000000..29cca05
--- /dev/null
+++ b/irgen/typemap.go
@@ -0,0 +1,2033 @@
+//===- typemap.go - type and type descriptor mapping ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the mapping from go/types types to LLVM types and to
+// type descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+type MethodResolver interface {
+ ResolveMethod(*types.Selection) *govalue
+}
+
+// llvmTypeMap is provides a means of mapping from a types.Map
+// to llgo's corresponding LLVM type representation.
+type llvmTypeMap struct {
+ sizes *types.StdSizes
+ ctx llvm.Context
+ target llvm.TargetData
+ inttype llvm.Type
+ stringType llvm.Type
+
+ types typeutil.Map
+}
+
+type typeDescInfo struct {
+ global llvm.Value
+ commonTypePtr llvm.Value
+ mapDescPtr llvm.Value
+ gc, gcPtr llvm.Value
+
+ interfaceMethodTables typeutil.Map
+}
+
+type TypeMap struct {
+ *llvmTypeMap
+ mc manglerContext
+
+ module llvm.Module
+ pkgpath string
+ types, algs typeutil.Map
+ runtime *runtimeInterface
+ methodResolver MethodResolver
+ types.MethodSetCache
+
+ commonTypeType, uncommonTypeType, ptrTypeType, funcTypeType, arrayTypeType, sliceTypeType, mapTypeType, chanTypeType, interfaceTypeType, structTypeType llvm.Type
+ mapDescType llvm.Type
+
+ methodType, imethodType, structFieldType llvm.Type
+
+ typeSliceType, methodSliceType, imethodSliceType, structFieldSliceType llvm.Type
+
+ hashFnType, equalFnType llvm.Type
+
+ hashFnEmptyInterface, hashFnInterface, hashFnFloat, hashFnComplex, hashFnString, hashFnIdentity, hashFnError llvm.Value
+ equalFnEmptyInterface, equalFnInterface, equalFnFloat, equalFnComplex, equalFnString, equalFnIdentity, equalFnError llvm.Value
+
+ zeroType llvm.Type
+ zeroValue llvm.Value
+}
+
+func NewLLVMTypeMap(ctx llvm.Context, target llvm.TargetData) *llvmTypeMap {
+ // spec says int is either 32-bit or 64-bit.
+ // ABI currently requires sizeof(int) == sizeof(uint) == sizeof(uintptr).
+ inttype := ctx.IntType(8 * target.PointerSize())
+
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ elements := []llvm.Type{i8ptr, inttype}
+ stringType := llvm.StructType(elements, false)
+
+ return &llvmTypeMap{
+ ctx: ctx,
+ sizes: &types.StdSizes{
+ WordSize: int64(target.PointerSize()),
+ MaxAlign: 8,
+ },
+ target: target,
+ inttype: inttype,
+ stringType: stringType,
+ }
+}
+
+func NewTypeMap(pkg *ssa.Package, llvmtm *llvmTypeMap, module llvm.Module, r *runtimeInterface, mr MethodResolver) *TypeMap {
+ tm := &TypeMap{
+ llvmTypeMap: llvmtm,
+ module: module,
+ pkgpath: pkg.Object.Path(),
+ runtime: r,
+ methodResolver: mr,
+ }
+
+ tm.mc.init(pkg.Prog, &tm.MethodSetCache)
+
+ uintptrType := tm.inttype
+ voidPtrType := llvm.PointerType(tm.ctx.Int8Type(), 0)
+ boolType := llvm.Int8Type()
+ stringPtrType := llvm.PointerType(tm.stringType, 0)
+
+ // Create runtime algorithm function types.
+ params := []llvm.Type{voidPtrType, uintptrType}
+ tm.hashFnType = llvm.FunctionType(uintptrType, params, false)
+ params = []llvm.Type{voidPtrType, voidPtrType, uintptrType}
+ tm.equalFnType = llvm.FunctionType(boolType, params, false)
+
+ tm.hashFnEmptyInterface = llvm.AddFunction(tm.module, "__go_type_hash_empty_interface", tm.hashFnType)
+ tm.hashFnInterface = llvm.AddFunction(tm.module, "__go_type_hash_interface", tm.hashFnType)
+ tm.hashFnFloat = llvm.AddFunction(tm.module, "__go_type_hash_float", tm.hashFnType)
+ tm.hashFnComplex = llvm.AddFunction(tm.module, "__go_type_hash_complex", tm.hashFnType)
+ tm.hashFnString = llvm.AddFunction(tm.module, "__go_type_hash_string", tm.hashFnType)
+ tm.hashFnIdentity = llvm.AddFunction(tm.module, "__go_type_hash_identity", tm.hashFnType)
+ tm.hashFnError = llvm.AddFunction(tm.module, "__go_type_hash_error", tm.hashFnType)
+
+ tm.equalFnEmptyInterface = llvm.AddFunction(tm.module, "__go_type_equal_empty_interface", tm.equalFnType)
+ tm.equalFnInterface = llvm.AddFunction(tm.module, "__go_type_equal_interface", tm.equalFnType)
+ tm.equalFnFloat = llvm.AddFunction(tm.module, "__go_type_equal_float", tm.equalFnType)
+ tm.equalFnComplex = llvm.AddFunction(tm.module, "__go_type_equal_complex", tm.equalFnType)
+ tm.equalFnString = llvm.AddFunction(tm.module, "__go_type_equal_string", tm.equalFnType)
+ tm.equalFnIdentity = llvm.AddFunction(tm.module, "__go_type_equal_identity", tm.equalFnType)
+ tm.equalFnError = llvm.AddFunction(tm.module, "__go_type_equal_error", tm.equalFnType)
+
+ // The body of this type is set in emitTypeDescInitializers once we have scanned
+ // every type, as it needs to be as large and well aligned as the
+ // largest/most aligned type.
+ tm.zeroType = tm.ctx.StructCreateNamed("zero")
+ tm.zeroValue = llvm.AddGlobal(tm.module, tm.zeroType, "go$zerovalue")
+ tm.zeroValue.SetLinkage(llvm.CommonLinkage)
+ tm.zeroValue.SetInitializer(llvm.ConstNull(tm.zeroType))
+
+ tm.commonTypeType = tm.ctx.StructCreateNamed("commonType")
+ commonTypeTypePtr := llvm.PointerType(tm.commonTypeType, 0)
+
+ tm.methodType = tm.ctx.StructCreateNamed("method")
+ tm.methodType.StructSetBody([]llvm.Type{
+ stringPtrType, // name
+ stringPtrType, // pkgPath
+ commonTypeTypePtr, // mtype (without receiver)
+ commonTypeTypePtr, // type (with receiver)
+ voidPtrType, // function
+ }, false)
+
+ tm.methodSliceType = tm.makeNamedSliceType("methodSlice", tm.methodType)
+
+ tm.uncommonTypeType = tm.ctx.StructCreateNamed("uncommonType")
+ tm.uncommonTypeType.StructSetBody([]llvm.Type{
+ stringPtrType, // name
+ stringPtrType, // pkgPath
+ tm.methodSliceType, // methods
+ }, false)
+
+ tm.commonTypeType.StructSetBody([]llvm.Type{
+ tm.ctx.Int8Type(), // Kind
+ tm.ctx.Int8Type(), // align
+ tm.ctx.Int8Type(), // fieldAlign
+ uintptrType, // size
+ tm.ctx.Int32Type(), // hash
+ llvm.PointerType(tm.hashFnType, 0), // hashfn
+ llvm.PointerType(tm.equalFnType, 0), // equalfn
+ voidPtrType, // gc
+ stringPtrType, // string
+ llvm.PointerType(tm.uncommonTypeType, 0), // uncommonType
+ commonTypeTypePtr, // ptrToThis
+ llvm.PointerType(tm.zeroType, 0), // zero
+ }, false)
+
+ tm.typeSliceType = tm.makeNamedSliceType("typeSlice", commonTypeTypePtr)
+
+ tm.ptrTypeType = tm.ctx.StructCreateNamed("ptrType")
+ tm.ptrTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ commonTypeTypePtr,
+ }, false)
+
+ tm.funcTypeType = tm.ctx.StructCreateNamed("funcType")
+ tm.funcTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ tm.ctx.Int8Type(), // dotdotdot
+ tm.typeSliceType, // in
+ tm.typeSliceType, // out
+ }, false)
+
+ tm.arrayTypeType = tm.ctx.StructCreateNamed("arrayType")
+ tm.arrayTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ commonTypeTypePtr, // elem
+ commonTypeTypePtr, // slice
+ tm.inttype, // len
+ }, false)
+
+ tm.sliceTypeType = tm.ctx.StructCreateNamed("sliceType")
+ tm.sliceTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ commonTypeTypePtr, // elem
+ }, false)
+
+ tm.mapTypeType = tm.ctx.StructCreateNamed("mapType")
+ tm.mapTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ commonTypeTypePtr, // key
+ commonTypeTypePtr, // elem
+ }, false)
+
+ tm.chanTypeType = tm.ctx.StructCreateNamed("chanType")
+ tm.chanTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ commonTypeTypePtr, // elem
+ tm.inttype, // dir
+ }, false)
+
+ tm.imethodType = tm.ctx.StructCreateNamed("imethod")
+ tm.imethodType.StructSetBody([]llvm.Type{
+ stringPtrType, // name
+ stringPtrType, // pkgPath
+ commonTypeTypePtr, // typ
+ }, false)
+
+ tm.imethodSliceType = tm.makeNamedSliceType("imethodSlice", tm.imethodType)
+
+ tm.interfaceTypeType = tm.ctx.StructCreateNamed("interfaceType")
+ tm.interfaceTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ tm.imethodSliceType,
+ }, false)
+
+ tm.structFieldType = tm.ctx.StructCreateNamed("structField")
+ tm.structFieldType.StructSetBody([]llvm.Type{
+ stringPtrType, // name
+ stringPtrType, // pkgPath
+ commonTypeTypePtr, // typ
+ stringPtrType, // tag
+ tm.inttype, // offset
+ }, false)
+
+ tm.structFieldSliceType = tm.makeNamedSliceType("structFieldSlice", tm.structFieldType)
+
+ tm.structTypeType = tm.ctx.StructCreateNamed("structType")
+ tm.structTypeType.StructSetBody([]llvm.Type{
+ tm.commonTypeType,
+ tm.structFieldSliceType, // fields
+ }, false)
+
+ tm.mapDescType = tm.ctx.StructCreateNamed("mapDesc")
+ tm.mapDescType.StructSetBody([]llvm.Type{
+ commonTypeTypePtr, // map_descriptor
+ tm.inttype, // entry_size
+ tm.inttype, // key_offset
+ tm.inttype, // value_offset
+ }, false)
+
+ return tm
+}
+
+func (tm *llvmTypeMap) ToLLVM(t types.Type) llvm.Type {
+ return tm.toLLVM(t, "")
+}
+
+func (tm *llvmTypeMap) toLLVM(t types.Type, name string) llvm.Type {
+ lt, ok := tm.types.At(t).(llvm.Type)
+ if !ok {
+ lt = tm.makeLLVMType(t, name)
+ if lt.IsNil() {
+ panic(fmt.Sprint("Failed to create LLVM type for: ", t))
+ }
+ tm.types.Set(t, lt)
+ }
+ return lt
+}
+
+func (tm *llvmTypeMap) makeLLVMType(t types.Type, name string) llvm.Type {
+ return tm.getBackendType(t).ToLLVM(tm.ctx)
+}
+
+func (tm *llvmTypeMap) Offsetsof(fields []*types.Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var o int64
+ for i, f := range fields {
+ a := tm.Alignof(f.Type())
+ o = align(o, a)
+ offsets[i] = o
+ o += tm.Sizeof(f.Type())
+ }
+ return offsets
+}
+
+var basicSizes = [...]byte{
+ types.Bool: 1,
+ types.Int8: 1,
+ types.Int16: 2,
+ types.Int32: 4,
+ types.Int64: 8,
+ types.Uint8: 1,
+ types.Uint16: 2,
+ types.Uint32: 4,
+ types.Uint64: 8,
+ types.Float32: 4,
+ types.Float64: 8,
+ types.Complex64: 8,
+ types.Complex128: 16,
+}
+
+func (tm *llvmTypeMap) Sizeof(T types.Type) int64 {
+ switch t := T.Underlying().(type) {
+ case *types.Basic:
+ k := t.Kind()
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == types.String {
+ return tm.sizes.WordSize * 2
+ }
+ case *types.Array:
+ a := tm.Alignof(t.Elem())
+ z := tm.Sizeof(t.Elem())
+ return align(z, a) * t.Len() // may be 0
+ case *types.Slice:
+ return tm.sizes.WordSize * 3
+ case *types.Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ fields := make([]*types.Var, t.NumFields())
+ for i := 0; i != t.NumFields(); i++ {
+ fields[i] = t.Field(i)
+ }
+ offsets := tm.Offsetsof(fields)
+ return align(offsets[n-1]+tm.Sizeof(t.Field(n-1).Type()), tm.Alignof(t))
+ case *types.Interface:
+ return tm.sizes.WordSize * 2
+ }
+ return tm.sizes.WordSize // catch-all
+}
+
+func (tm *llvmTypeMap) Alignof(t types.Type) int64 {
+ return tm.sizes.Alignof(t)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func (tm *TypeMap) ToRuntime(t types.Type) llvm.Value {
+ return llvm.ConstBitCast(tm.getTypeDescriptorPointer(t), llvm.PointerType(llvm.Int8Type(), 0))
+}
+
+type localNamedTypeInfo struct {
+ functionName string
+ scopeNum int
+}
+
+type namedTypeInfo struct {
+ pkgname, pkgpath string
+ name string
+ localNamedTypeInfo
+}
+
+type manglerContext struct {
+ ti map[*types.Named]localNamedTypeInfo
+ msc *types.MethodSetCache
+}
+
+// Assembles the method set into the order that gccgo uses (unexported methods first).
+// TODO(pcc): cache this.
+func orderedMethodSet(ms *types.MethodSet) []*types.Selection {
+ oms := make([]*types.Selection, ms.Len())
+ omsi := 0
+ for i := 0; i != ms.Len(); i++ {
+ if sel := ms.At(i); !sel.Obj().Exported() {
+ oms[omsi] = sel
+ omsi++
+ }
+ }
+ for i := 0; i != ms.Len(); i++ {
+ if sel := ms.At(i); sel.Obj().Exported() {
+ oms[omsi] = sel
+ omsi++
+ }
+ }
+ return oms
+}
+
+func (ctx *manglerContext) init(prog *ssa.Program, msc *types.MethodSetCache) {
+ ctx.msc = msc
+ ctx.ti = make(map[*types.Named]localNamedTypeInfo)
+ for f, _ := range ssautil.AllFunctions(prog) {
+ scopeNum := 0
+ var addNamedTypesToMap func(*types.Scope)
+ addNamedTypesToMap = func(scope *types.Scope) {
+ hasNamedTypes := false
+ for _, n := range scope.Names() {
+ if tn, ok := scope.Lookup(n).(*types.TypeName); ok {
+ hasNamedTypes = true
+ ctx.ti[tn.Type().(*types.Named)] = localNamedTypeInfo{f.Name(), scopeNum}
+ }
+ }
+ if hasNamedTypes {
+ scopeNum++
+ }
+ for i := 0; i != scope.NumChildren(); i++ {
+ addNamedTypesToMap(scope.Child(i))
+ }
+ }
+ if fobj, ok := f.Object().(*types.Func); ok && fobj.Scope() != nil {
+ addNamedTypesToMap(fobj.Scope())
+ }
+ }
+}
+
+func (ctx *manglerContext) getNamedTypeInfo(t types.Type) (nti namedTypeInfo) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.Byte:
+ nti.name = "uint8"
+ case types.Rune:
+ nti.name = "int32"
+ case types.UnsafePointer:
+ nti.pkgname = "unsafe"
+ nti.pkgpath = "unsafe"
+ nti.name = "Pointer"
+ default:
+ nti.name = t.Name()
+ }
+
+ case *types.Named:
+ obj := t.Obj()
+ if pkg := obj.Pkg(); pkg != nil {
+ nti.pkgname = obj.Pkg().Name()
+ nti.pkgpath = obj.Pkg().Path()
+ }
+ nti.name = obj.Name()
+ nti.localNamedTypeInfo = ctx.ti[t]
+
+ default:
+ panic("not a named type")
+ }
+
+ return
+}
+
+func (ctx *manglerContext) mangleSignature(s *types.Signature, recv *types.Var, b *bytes.Buffer) {
+ b.WriteRune('F')
+ if recv != nil {
+ b.WriteRune('m')
+ ctx.mangleType(recv.Type(), b)
+ }
+
+ if p := s.Params(); p.Len() != 0 {
+ b.WriteRune('p')
+ for i := 0; i != p.Len(); i++ {
+ ctx.mangleType(p.At(i).Type(), b)
+ }
+ if s.Variadic() {
+ b.WriteRune('V')
+ }
+ b.WriteRune('e')
+ }
+
+ if r := s.Results(); r.Len() != 0 {
+ b.WriteRune('r')
+ for i := 0; i != r.Len(); i++ {
+ ctx.mangleType(r.At(i).Type(), b)
+ }
+ b.WriteRune('e')
+ }
+
+ b.WriteRune('e')
+}
+
+func (ctx *manglerContext) manglePackagePath(pkgpath string, b *bytes.Buffer) {
+ pkgpath = strings.Replace(pkgpath, "/", "_", -1)
+ pkgpath = strings.Replace(pkgpath, ".", "_", -1)
+ b.WriteString(pkgpath)
+}
+
+func (ctx *manglerContext) mangleType(t types.Type, b *bytes.Buffer) {
+ switch t := t.(type) {
+ case *types.Basic, *types.Named:
+ var nb bytes.Buffer
+ ti := ctx.getNamedTypeInfo(t)
+ if ti.pkgpath != "" {
+ ctx.manglePackagePath(ti.pkgpath, &nb)
+ nb.WriteRune('.')
+ }
+ if ti.functionName != "" {
+ nb.WriteString(ti.functionName)
+ nb.WriteRune('$')
+ if ti.scopeNum != 0 {
+ nb.WriteString(strconv.Itoa(ti.scopeNum))
+ nb.WriteRune('$')
+ }
+ }
+ nb.WriteString(ti.name)
+
+ b.WriteRune('N')
+ b.WriteString(strconv.Itoa(nb.Len()))
+ b.WriteRune('_')
+ b.WriteString(nb.String())
+
+ case *types.Pointer:
+ b.WriteRune('p')
+ ctx.mangleType(t.Elem(), b)
+
+ case *types.Map:
+ b.WriteRune('M')
+ ctx.mangleType(t.Key(), b)
+ b.WriteString("__")
+ ctx.mangleType(t.Elem(), b)
+
+ case *types.Chan:
+ b.WriteRune('C')
+ ctx.mangleType(t.Elem(), b)
+ switch t.Dir() {
+ case types.SendOnly:
+ b.WriteRune('s')
+ case types.RecvOnly:
+ b.WriteRune('r')
+ case types.SendRecv:
+ b.WriteString("sr")
+ }
+ b.WriteRune('e')
+
+ case *types.Signature:
+ ctx.mangleSignature(t, t.Recv(), b)
+
+ case *types.Array:
+ b.WriteRune('A')
+ ctx.mangleType(t.Elem(), b)
+ b.WriteString(strconv.FormatInt(t.Len(), 10))
+ b.WriteRune('e')
+
+ case *types.Slice:
+ b.WriteRune('A')
+ ctx.mangleType(t.Elem(), b)
+ b.WriteRune('e')
+
+ case *types.Struct:
+ b.WriteRune('S')
+ for i := 0; i != t.NumFields(); i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ b.WriteString("0_")
+ } else {
+ b.WriteString(strconv.Itoa(len(f.Name())))
+ b.WriteRune('_')
+ b.WriteString(f.Name())
+ }
+ ctx.mangleType(f.Type(), b)
+ // TODO: tags are mangled here
+ }
+ b.WriteRune('e')
+
+ case *types.Interface:
+ b.WriteRune('I')
+ methodset := ctx.msc.MethodSet(t)
+ for _, m := range orderedMethodSet(methodset) {
+ method := m.Obj()
+ var nb bytes.Buffer
+ if !method.Exported() {
+ nb.WriteRune('.')
+ nb.WriteString(method.Pkg().Path())
+ nb.WriteRune('.')
+ }
+ nb.WriteString(method.Name())
+
+ b.WriteString(strconv.Itoa(nb.Len()))
+ b.WriteRune('_')
+ b.WriteString(nb.String())
+
+ ctx.mangleSignature(method.Type().(*types.Signature), nil, b)
+ }
+ b.WriteRune('e')
+
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+}
+
+func (ctx *manglerContext) mangleTypeDescriptorName(t types.Type, b *bytes.Buffer) {
+ switch t := t.(type) {
+ case *types.Basic, *types.Named:
+ b.WriteString("__go_tdn_")
+ ti := ctx.getNamedTypeInfo(t)
+ if ti.pkgpath != "" {
+ ctx.manglePackagePath(ti.pkgpath, b)
+ b.WriteRune('.')
+ }
+ if ti.functionName != "" {
+ b.WriteString(ti.functionName)
+ b.WriteRune('.')
+ if ti.scopeNum != 0 {
+ b.WriteString(strconv.Itoa(ti.scopeNum))
+ b.WriteRune('.')
+ }
+ }
+ b.WriteString(ti.name)
+
+ default:
+ b.WriteString("__go_td_")
+ ctx.mangleType(t, b)
+ }
+}
+
+func (ctx *manglerContext) mangleMapDescriptorName(t types.Type, b *bytes.Buffer) {
+ b.WriteString("__go_map_")
+ ctx.mangleType(t, b)
+}
+
+func (ctx *manglerContext) mangleImtName(srctype types.Type, targettype *types.Interface, b *bytes.Buffer) {
+ b.WriteString("__go_imt_")
+ ctx.mangleType(targettype, b)
+ b.WriteString("__")
+ ctx.mangleType(srctype, b)
+}
+
+func (ctx *manglerContext) mangleHashFunctionName(t types.Type) string {
+ var b bytes.Buffer
+ b.WriteString("__go_type_hash_")
+ ctx.mangleType(t, &b)
+ return b.String()
+}
+
+func (ctx *manglerContext) mangleEqualFunctionName(t types.Type) string {
+ var b bytes.Buffer
+ b.WriteString("__go_type_equal_")
+ ctx.mangleType(t, &b)
+ return b.String()
+}
+
+func (ctx *manglerContext) mangleFunctionName(f *ssa.Function) string {
+ var b bytes.Buffer
+
+ if f.Parent() != nil {
+ // Anonymous functions are not guaranteed to
+ // have unique identifiers at the global scope.
+ b.WriteString(ctx.mangleFunctionName(f.Parent()))
+ b.WriteRune(':')
+ b.WriteString(f.String())
+ return b.String()
+ }
+
+ pkg := f.Pkg
+ var pkgobj *types.Package
+ if pkg != nil {
+ pkgobj = pkg.Object
+ } else if f.Signature.Recv() != nil {
+ pkgobj = f.Signature.Recv().Pkg()
+ } else {
+ b.WriteString(f.String())
+ return b.String()
+ }
+
+ if pkg != nil {
+ ctx.manglePackagePath(pkgobj.Path(), &b)
+ b.WriteRune('.')
+ }
+ if f.Signature.Recv() == nil && f.Name() == "init" {
+ b.WriteString(".import")
+ } else {
+ b.WriteString(f.Name())
+ }
+ if f.Signature.Recv() != nil {
+ b.WriteRune('.')
+ ctx.mangleType(f.Signature.Recv().Type(), &b)
+ }
+
+ return b.String()
+}
+
+func (ctx *manglerContext) mangleGlobalName(g *ssa.Global) string {
+ var b bytes.Buffer
+
+ ctx.manglePackagePath(g.Pkg.Object.Path(), &b)
+ b.WriteRune('.')
+ b.WriteString(g.Name())
+
+ return b.String()
+}
+
+const (
+ // From gofrontend/types.h
+ gccgoTypeClassERROR = iota
+ gccgoTypeClassVOID
+ gccgoTypeClassBOOLEAN
+ gccgoTypeClassINTEGER
+ gccgoTypeClassFLOAT
+ gccgoTypeClassCOMPLEX
+ gccgoTypeClassSTRING
+ gccgoTypeClassSINK
+ gccgoTypeClassFUNCTION
+ gccgoTypeClassPOINTER
+ gccgoTypeClassNIL
+ gccgoTypeClassCALL_MULTIPLE_RESULT
+ gccgoTypeClassSTRUCT
+ gccgoTypeClassARRAY
+ gccgoTypeClassMAP
+ gccgoTypeClassCHANNEL
+ gccgoTypeClassINTERFACE
+ gccgoTypeClassNAMED
+ gccgoTypeClassFORWARD
+)
+
+func getStringHash(s string, h uint32) uint32 {
+ for _, c := range []byte(s) {
+ h ^= uint32(c)
+ h += 16777619
+ }
+ return h
+}
+
+func (tm *TypeMap) getTypeHash(t types.Type) uint32 {
+ switch t := t.(type) {
+ case *types.Basic, *types.Named:
+ nti := tm.mc.getNamedTypeInfo(t)
+ h := getStringHash(nti.functionName+nti.name+nti.pkgpath, 0)
+ h ^= uint32(nti.scopeNum)
+ return gccgoTypeClassNAMED + h
+
+ case *types.Signature:
+ var h uint32
+
+ p := t.Params()
+ for i := 0; i != p.Len(); i++ {
+ h += tm.getTypeHash(p.At(i).Type()) << uint32(i+1)
+ }
+
+ r := t.Results()
+ for i := 0; i != r.Len(); i++ {
+ h += tm.getTypeHash(r.At(i).Type()) << uint32(i+2)
+ }
+
+ if t.Variadic() {
+ h += 1
+ }
+ h <<= 4
+ return gccgoTypeClassFUNCTION + h
+
+ case *types.Pointer:
+ return gccgoTypeClassPOINTER + (tm.getTypeHash(t.Elem()) << 4)
+
+ case *types.Struct:
+ var h uint32
+ for i := 0; i != t.NumFields(); i++ {
+ h = (h << 1) + tm.getTypeHash(t.Field(i).Type())
+ }
+ h <<= 2
+ return gccgoTypeClassSTRUCT + h
+
+ case *types.Array:
+ return gccgoTypeClassARRAY + tm.getTypeHash(t.Elem()) + 1
+
+ case *types.Slice:
+ return gccgoTypeClassARRAY + tm.getTypeHash(t.Elem()) + 1
+
+ case *types.Map:
+ return gccgoTypeClassMAP + tm.getTypeHash(t.Key()) + tm.getTypeHash(t.Elem()) + 2
+
+ case *types.Chan:
+ var h uint32
+
+ switch t.Dir() {
+ case types.SendOnly:
+ h = 1
+ case types.RecvOnly:
+ h = 2
+ case types.SendRecv:
+ h = 3
+ }
+
+ h += tm.getTypeHash(t.Elem()) << 2
+ h <<= 3
+ return gccgoTypeClassCHANNEL + h
+
+ case *types.Interface:
+ var h uint32
+ for _, m := range orderedMethodSet(tm.MethodSet(t)) {
+ h = getStringHash(m.Obj().Name(), h)
+ h <<= 1
+ }
+ return gccgoTypeClassINTERFACE + h
+
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+}
+
+func (tm *TypeMap) writeType(typ types.Type, b *bytes.Buffer) {
+ switch t := typ.(type) {
+ case *types.Basic, *types.Named:
+ ti := tm.mc.getNamedTypeInfo(t)
+ if ti.pkgpath != "" {
+ b.WriteByte('\t')
+ tm.mc.manglePackagePath(ti.pkgpath, b)
+ b.WriteByte('\t')
+ b.WriteString(ti.pkgname)
+ b.WriteByte('.')
+ }
+ if ti.functionName != "" {
+ b.WriteByte('\t')
+ b.WriteString(ti.functionName)
+ b.WriteByte('$')
+ if ti.scopeNum != 0 {
+ b.WriteString(strconv.Itoa(ti.scopeNum))
+ b.WriteByte('$')
+ }
+ b.WriteByte('\t')
+ }
+ b.WriteString(ti.name)
+
+ case *types.Array:
+ fmt.Fprintf(b, "[%d]", t.Len())
+ tm.writeType(t.Elem(), b)
+
+ case *types.Slice:
+ b.WriteString("[]")
+ tm.writeType(t.Elem(), b)
+
+ case *types.Struct:
+ if t.NumFields() == 0 {
+ b.WriteString("struct {}")
+ return
+ }
+ b.WriteString("struct { ")
+ for i := 0; i != t.NumFields(); i++ {
+ f := t.Field(i)
+ if i > 0 {
+ b.WriteString("; ")
+ }
+ if !f.Anonymous() {
+ b.WriteString(f.Name())
+ b.WriteByte(' ')
+ }
+ tm.writeType(f.Type(), b)
+ if tag := t.Tag(i); tag != "" {
+ fmt.Fprintf(b, " %q", tag)
+ }
+ }
+ b.WriteString(" }")
+
+ case *types.Pointer:
+ b.WriteByte('*')
+ tm.writeType(t.Elem(), b)
+
+ case *types.Signature:
+ b.WriteString("func")
+ tm.writeSignature(t, b)
+
+ case *types.Interface:
+ if t.NumMethods() == 0 && t.NumEmbeddeds() == 0 {
+ b.WriteString("interface {}")
+ return
+ }
+ // We write the source-level methods and embedded types rather
+ // than the actual method set since resolved method signatures
+ // may have non-printable cycles if parameters have anonymous
+ // interface types that (directly or indirectly) embed the
+ // current interface. For instance, consider the result type
+ // of m:
+ //
+ // type T interface{
+ // m() interface{ T }
+ // }
+ //
+ b.WriteString("interface { ")
+ // print explicit interface methods and embedded types
+ for i := 0; i != t.NumMethods(); i++ {
+ m := t.Method(i)
+ if i > 0 {
+ b.WriteString("; ")
+ }
+ if !m.Exported() {
+ b.WriteString(m.Pkg().Path())
+ b.WriteByte('.')
+ }
+ b.WriteString(m.Name())
+ tm.writeSignature(m.Type().(*types.Signature), b)
+ }
+ for i := 0; i != t.NumEmbeddeds(); i++ {
+ typ := t.Embedded(i)
+ if i > 0 || t.NumMethods() > 0 {
+ b.WriteString("; ")
+ }
+ tm.writeType(typ, b)
+ }
+ b.WriteString(" }")
+
+ case *types.Map:
+ b.WriteString("map[")
+ tm.writeType(t.Key(), b)
+ b.WriteByte(']')
+ tm.writeType(t.Elem(), b)
+
+ case *types.Chan:
+ var s string
+ var parens bool
+ switch t.Dir() {
+ case types.SendRecv:
+ s = "chan "
+ // chan (<-chan T) requires parentheses
+ if c, _ := t.Elem().(*types.Chan); c != nil && c.Dir() == types.RecvOnly {
+ parens = true
+ }
+ case types.SendOnly:
+ s = "chan<- "
+ case types.RecvOnly:
+ s = "<-chan "
+ default:
+ panic("unreachable")
+ }
+ b.WriteString(s)
+ if parens {
+ b.WriteByte('(')
+ }
+ tm.writeType(t.Elem(), b)
+ if parens {
+ b.WriteByte(')')
+ }
+
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+}
+
+func (tm *TypeMap) writeTuple(tup *types.Tuple, variadic bool, b *bytes.Buffer) {
+ b.WriteByte('(')
+ if tup != nil {
+ for i := 0; i != tup.Len(); i++ {
+ v := tup.At(i)
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ typ := v.Type()
+ if variadic && i == tup.Len()-1 {
+ b.WriteString("...")
+ typ = typ.(*types.Slice).Elem()
+ }
+ tm.writeType(typ, b)
+ }
+ }
+ b.WriteByte(')')
+}
+
+func (tm *TypeMap) writeSignature(sig *types.Signature, b *bytes.Buffer) {
+ tm.writeTuple(sig.Params(), sig.Variadic(), b)
+
+ n := sig.Results().Len()
+ if n == 0 {
+ // no result
+ return
+ }
+
+ b.WriteByte(' ')
+ if n == 1 {
+ tm.writeType(sig.Results().At(0).Type(), b)
+ return
+ }
+
+ // multiple results
+ tm.writeTuple(sig.Results(), false, b)
+}
+
+func (tm *TypeMap) getTypeDescType(t types.Type) llvm.Type {
+ switch t.Underlying().(type) {
+ case *types.Basic:
+ return tm.commonTypeType
+ case *types.Pointer:
+ return tm.ptrTypeType
+ case *types.Signature:
+ return tm.funcTypeType
+ case *types.Array:
+ return tm.arrayTypeType
+ case *types.Slice:
+ return tm.sliceTypeType
+ case *types.Map:
+ return tm.mapTypeType
+ case *types.Chan:
+ return tm.chanTypeType
+ case *types.Struct:
+ return tm.structTypeType
+ case *types.Interface:
+ return tm.interfaceTypeType
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+}
+
+func (tm *TypeMap) getNamedTypeLinkage(nt *types.Named) (linkage llvm.Linkage, emit bool) {
+ if pkg := nt.Obj().Pkg(); pkg != nil {
+ linkage = llvm.ExternalLinkage
+ emit = pkg.Path() == tm.pkgpath
+ } else {
+ linkage = llvm.LinkOnceODRLinkage
+ emit = true
+ }
+
+ return
+}
+
+func (tm *TypeMap) getTypeDescLinkage(t types.Type) (linkage llvm.Linkage, emit bool) {
+ switch t := t.(type) {
+ case *types.Named:
+ linkage, emit = tm.getNamedTypeLinkage(t)
+
+ case *types.Pointer:
+ elem := t.Elem()
+ if nt, ok := elem.(*types.Named); ok {
+ // Thanks to the ptrToThis member, pointers to named types appear
+ // in exactly the same objects as the named types themselves, so
+ // we can give them the same linkage.
+ linkage, emit = tm.getNamedTypeLinkage(nt)
+ return
+ }
+ linkage = llvm.LinkOnceODRLinkage
+ emit = true
+
+ default:
+ linkage = llvm.LinkOnceODRLinkage
+ emit = true
+ }
+
+ return
+}
+
+type typeAndInfo struct {
+ typ types.Type
+ typeString string
+ tdi *typeDescInfo
+}
+
+type byTypeName []typeAndInfo
+
+func (ts byTypeName) Len() int { return len(ts) }
+func (ts byTypeName) Swap(i, j int) {
+ ts[i], ts[j] = ts[j], ts[i]
+}
+func (ts byTypeName) Less(i, j int) bool {
+ return ts[i].typeString < ts[j].typeString
+}
+
+func (tm *TypeMap) emitTypeDescInitializers() {
+ var maxSize, maxAlign int64
+ maxAlign = 1
+
+ for changed := true; changed; {
+ changed = false
+
+ var ts []typeAndInfo
+
+ tm.types.Iterate(func(key types.Type, value interface{}) {
+ tdi := value.(*typeDescInfo)
+ if tdi.global.Initializer().C == nil {
+ linkage, emit := tm.getTypeDescLinkage(key)
+ tdi.global.SetLinkage(linkage)
+ tdi.gc.SetLinkage(linkage)
+ if emit {
+ changed = true
+ ts = append(ts, typeAndInfo{key, key.String(), tdi})
+ }
+ }
+ })
+
+ if changed {
+ sort.Sort(byTypeName(ts))
+ for _, t := range ts {
+ tm.emitTypeDescInitializer(t.typ, t.tdi)
+ if size := tm.Sizeof(t.typ); size > maxSize {
+ maxSize = size
+ }
+ if align := tm.Alignof(t.typ); align > maxAlign {
+ maxAlign = align
+ }
+ }
+ }
+ }
+
+ tm.zeroType.StructSetBody([]llvm.Type{llvm.ArrayType(tm.ctx.Int8Type(), int(maxSize))}, false)
+ tm.zeroValue.SetAlignment(int(maxAlign))
+}
+
+const (
+ // From libgo/runtime/mgc0.h
+ gcOpcodeEND = iota
+ gcOpcodePTR
+ gcOpcodeAPTR
+ gcOpcodeARRAY_START
+ gcOpcodeARRAY_NEXT
+ gcOpcodeCALL
+ gcOpcodeCHAN_PTR
+ gcOpcodeSTRING
+ gcOpcodeEFACE
+ gcOpcodeIFACE
+ gcOpcodeSLICE
+ gcOpcodeREGION
+
+ gcStackCapacity = 8
+)
+
+func (tm *TypeMap) makeGcInst(val int64) llvm.Value {
+ c := llvm.ConstInt(tm.inttype, uint64(val), false)
+ return llvm.ConstIntToPtr(c, llvm.PointerType(tm.ctx.Int8Type(), 0))
+}
+
+func (tm *TypeMap) appendGcInsts(insts []llvm.Value, t types.Type, offset, stackSize int64) []llvm.Value {
+ switch u := t.Underlying().(type) {
+ case *types.Basic:
+ switch u.Kind() {
+ case types.String:
+ insts = append(insts, tm.makeGcInst(gcOpcodeSTRING), tm.makeGcInst(offset))
+ case types.UnsafePointer:
+ insts = append(insts, tm.makeGcInst(gcOpcodeAPTR), tm.makeGcInst(offset))
+ }
+ case *types.Pointer:
+ insts = append(insts, tm.makeGcInst(gcOpcodePTR), tm.makeGcInst(offset),
+ tm.getGcPointer(u.Elem()))
+ case *types.Signature, *types.Map:
+ insts = append(insts, tm.makeGcInst(gcOpcodeAPTR), tm.makeGcInst(offset))
+ case *types.Array:
+ if u.Len() == 0 {
+ return insts
+ } else if stackSize >= gcStackCapacity {
+ insts = append(insts, tm.makeGcInst(gcOpcodeREGION), tm.makeGcInst(offset),
+ tm.makeGcInst(tm.Sizeof(t)), tm.getGcPointer(t))
+ } else {
+ insts = append(insts, tm.makeGcInst(gcOpcodeARRAY_START), tm.makeGcInst(offset),
+ tm.makeGcInst(u.Len()), tm.makeGcInst(tm.Sizeof(u.Elem())))
+ insts = tm.appendGcInsts(insts, u.Elem(), 0, stackSize+1)
+ insts = append(insts, tm.makeGcInst(gcOpcodeARRAY_NEXT))
+ }
+ case *types.Slice:
+ if tm.Sizeof(u.Elem()) == 0 {
+ insts = append(insts, tm.makeGcInst(gcOpcodeAPTR), tm.makeGcInst(offset))
+ } else {
+ insts = append(insts, tm.makeGcInst(gcOpcodeSLICE), tm.makeGcInst(offset),
+ tm.getGcPointer(u.Elem()))
+ }
+ case *types.Chan:
+ insts = append(insts, tm.makeGcInst(gcOpcodeCHAN_PTR), tm.makeGcInst(offset),
+ tm.ToRuntime(t))
+ case *types.Struct:
+ fields := make([]*types.Var, u.NumFields())
+ for i := range fields {
+ fields[i] = u.Field(i)
+ }
+ offsets := tm.Offsetsof(fields)
+
+ for i, field := range fields {
+ insts = tm.appendGcInsts(insts, field.Type(), offset+offsets[i], stackSize)
+ }
+ case *types.Interface:
+ if u.NumMethods() == 0 {
+ insts = append(insts, tm.makeGcInst(gcOpcodeEFACE), tm.makeGcInst(offset))
+ } else {
+ insts = append(insts, tm.makeGcInst(gcOpcodeIFACE), tm.makeGcInst(offset))
+ }
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+
+ return insts
+}
+
+func (tm *TypeMap) emitTypeDescInitializer(t types.Type, tdi *typeDescInfo) {
+ // initialize type descriptor
+ tdi.global.SetInitializer(tm.makeTypeDescInitializer(t))
+
+ // initialize GC program
+ insts := []llvm.Value{tm.makeGcInst(tm.Sizeof(t))}
+ insts = tm.appendGcInsts(insts, t, 0, 0)
+ insts = append(insts, tm.makeGcInst(gcOpcodeEND))
+
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ instArray := llvm.ConstArray(i8ptr, insts)
+
+ newGc := llvm.AddGlobal(tm.module, instArray.Type(), "")
+ newGc.SetGlobalConstant(true)
+ newGc.SetInitializer(instArray)
+ gcName := tdi.gc.Name()
+ tdi.gc.SetName("")
+ newGc.SetName(gcName)
+ newGc.SetLinkage(tdi.gc.Linkage())
+
+ tdi.gc.ReplaceAllUsesWith(llvm.ConstBitCast(newGc, tdi.gc.Type()))
+ tdi.gc.EraseFromParentAsGlobal()
+ tdi.gc = llvm.Value{nil}
+ tdi.gcPtr = llvm.ConstBitCast(newGc, i8ptr)
+}
+
+func (tm *TypeMap) makeTypeDescInitializer(t types.Type) llvm.Value {
+ switch u := t.Underlying().(type) {
+ case *types.Basic:
+ return tm.makeBasicType(t, u)
+ case *types.Pointer:
+ return tm.makePointerType(t, u)
+ case *types.Signature:
+ return tm.makeFuncType(t, u)
+ case *types.Array:
+ return tm.makeArrayType(t, u)
+ case *types.Slice:
+ return tm.makeSliceType(t, u)
+ case *types.Map:
+ return tm.makeMapType(t, u)
+ case *types.Chan:
+ return tm.makeChanType(t, u)
+ case *types.Struct:
+ return tm.makeStructType(t, u)
+ case *types.Interface:
+ return tm.makeInterfaceType(t, u)
+ default:
+ panic(fmt.Sprintf("unhandled type: %#v", t))
+ }
+}
+
+type algorithmFns struct {
+ hash, equal llvm.Value
+}
+
+func (tm *TypeMap) getStructAlgorithmFunctions(st *types.Struct) (hash, equal llvm.Value) {
+ if algs, ok := tm.algs.At(st).(algorithmFns); ok {
+ return algs.hash, algs.equal
+ }
+
+ hashes := make([]llvm.Value, st.NumFields())
+ equals := make([]llvm.Value, st.NumFields())
+
+ for i := range hashes {
+ fhash, fequal := tm.getAlgorithmFunctions(st.Field(i).Type())
+ if fhash == tm.hashFnError {
+ return fhash, fequal
+ }
+ hashes[i], equals[i] = fhash, fequal
+ }
+
+ i8ptr := llvm.PointerType(tm.ctx.Int8Type(), 0)
+ llsptrty := llvm.PointerType(tm.ToLLVM(st), 0)
+
+ builder := tm.ctx.NewBuilder()
+ defer builder.Dispose()
+
+ hash = llvm.AddFunction(tm.module, tm.mc.mangleHashFunctionName(st), tm.hashFnType)
+ hash.SetLinkage(llvm.LinkOnceODRLinkage)
+ builder.SetInsertPointAtEnd(llvm.AddBasicBlock(hash, "entry"))
+ sptr := builder.CreateBitCast(hash.Param(0), llsptrty, "")
+
+ hashval := llvm.ConstNull(tm.inttype)
+ i33 := llvm.ConstInt(tm.inttype, 33, false)
+
+ for i, fhash := range hashes {
+ fptr := builder.CreateStructGEP(sptr, i, "")
+ fptr = builder.CreateBitCast(fptr, i8ptr, "")
+
+ fsize := llvm.ConstInt(tm.inttype, uint64(tm.sizes.Sizeof(st.Field(i).Type())), false)
+
+ hashcall := builder.CreateCall(fhash, []llvm.Value{fptr, fsize}, "")
+ hashval = builder.CreateMul(hashval, i33, "")
+ hashval = builder.CreateAdd(hashval, hashcall, "")
+ }
+
+ builder.CreateRet(hashval)
+
+ equal = llvm.AddFunction(tm.module, tm.mc.mangleEqualFunctionName(st), tm.equalFnType)
+ equal.SetLinkage(llvm.LinkOnceODRLinkage)
+ eqentrybb := llvm.AddBasicBlock(equal, "entry")
+ eqretzerobb := llvm.AddBasicBlock(equal, "retzero")
+
+ builder.SetInsertPointAtEnd(eqentrybb)
+ s1ptr := builder.CreateBitCast(equal.Param(0), llsptrty, "")
+ s2ptr := builder.CreateBitCast(equal.Param(1), llsptrty, "")
+
+ zerobool := llvm.ConstNull(tm.ctx.Int8Type())
+ onebool := llvm.ConstInt(tm.ctx.Int8Type(), 1, false)
+
+ for i, fequal := range equals {
+ f1ptr := builder.CreateStructGEP(s1ptr, i, "")
+ f1ptr = builder.CreateBitCast(f1ptr, i8ptr, "")
+ f2ptr := builder.CreateStructGEP(s2ptr, i, "")
+ f2ptr = builder.CreateBitCast(f2ptr, i8ptr, "")
+
+ fsize := llvm.ConstInt(tm.inttype, uint64(tm.sizes.Sizeof(st.Field(i).Type())), false)
+
+ equalcall := builder.CreateCall(fequal, []llvm.Value{f1ptr, f2ptr, fsize}, "")
+ equaleqzero := builder.CreateICmp(llvm.IntEQ, equalcall, zerobool, "")
+
+ contbb := llvm.AddBasicBlock(equal, "cont")
+ builder.CreateCondBr(equaleqzero, eqretzerobb, contbb)
+
+ builder.SetInsertPointAtEnd(contbb)
+ }
+
+ builder.CreateRet(onebool)
+
+ builder.SetInsertPointAtEnd(eqretzerobb)
+ builder.CreateRet(zerobool)
+
+ tm.algs.Set(st, algorithmFns{hash, equal})
+ return
+}
+
+func (tm *TypeMap) getArrayAlgorithmFunctions(at *types.Array) (hash, equal llvm.Value) {
+ if algs, ok := tm.algs.At(at).(algorithmFns); ok {
+ return algs.hash, algs.equal
+ }
+
+ ehash, eequal := tm.getAlgorithmFunctions(at.Elem())
+ if ehash == tm.hashFnError {
+ return ehash, eequal
+ }
+
+ i8ptr := llvm.PointerType(tm.ctx.Int8Type(), 0)
+ llelemty := llvm.PointerType(tm.ToLLVM(at.Elem()), 0)
+
+ i1 := llvm.ConstInt(tm.inttype, 1, false)
+ alen := llvm.ConstInt(tm.inttype, uint64(at.Len()), false)
+ esize := llvm.ConstInt(tm.inttype, uint64(tm.sizes.Sizeof(at.Elem())), false)
+
+ builder := tm.ctx.NewBuilder()
+ defer builder.Dispose()
+
+ hash = llvm.AddFunction(tm.module, tm.mc.mangleHashFunctionName(at), tm.hashFnType)
+ hash.SetLinkage(llvm.LinkOnceODRLinkage)
+ hashentrybb := llvm.AddBasicBlock(hash, "entry")
+ builder.SetInsertPointAtEnd(hashentrybb)
+ if at.Len() == 0 {
+ builder.CreateRet(llvm.ConstNull(tm.inttype))
+ } else {
+ i33 := llvm.ConstInt(tm.inttype, 33, false)
+
+ aptr := builder.CreateBitCast(hash.Param(0), llelemty, "")
+ loopbb := llvm.AddBasicBlock(hash, "loop")
+ builder.CreateBr(loopbb)
+
+ exitbb := llvm.AddBasicBlock(hash, "exit")
+
+ builder.SetInsertPointAtEnd(loopbb)
+ indexphi := builder.CreatePHI(tm.inttype, "")
+ index := indexphi
+ hashvalphi := builder.CreatePHI(tm.inttype, "")
+ hashval := hashvalphi
+
+ eptr := builder.CreateGEP(aptr, []llvm.Value{index}, "")
+ eptr = builder.CreateBitCast(eptr, i8ptr, "")
+
+ hashcall := builder.CreateCall(ehash, []llvm.Value{eptr, esize}, "")
+ hashval = builder.CreateMul(hashval, i33, "")
+ hashval = builder.CreateAdd(hashval, hashcall, "")
+
+ index = builder.CreateAdd(index, i1, "")
+
+ indexphi.AddIncoming(
+ []llvm.Value{llvm.ConstNull(tm.inttype), index},
+ []llvm.BasicBlock{hashentrybb, loopbb},
+ )
+ hashvalphi.AddIncoming(
+ []llvm.Value{llvm.ConstNull(tm.inttype), hashval},
+ []llvm.BasicBlock{hashentrybb, loopbb},
+ )
+
+ exit := builder.CreateICmp(llvm.IntEQ, index, alen, "")
+ builder.CreateCondBr(exit, exitbb, loopbb)
+
+ builder.SetInsertPointAtEnd(exitbb)
+ builder.CreateRet(hashval)
+ }
+
+ zerobool := llvm.ConstNull(tm.ctx.Int8Type())
+ onebool := llvm.ConstInt(tm.ctx.Int8Type(), 1, false)
+
+ equal = llvm.AddFunction(tm.module, tm.mc.mangleEqualFunctionName(at), tm.equalFnType)
+ equal.SetLinkage(llvm.LinkOnceODRLinkage)
+ eqentrybb := llvm.AddBasicBlock(equal, "entry")
+ builder.SetInsertPointAtEnd(eqentrybb)
+ if at.Len() == 0 {
+ builder.CreateRet(onebool)
+ } else {
+ a1ptr := builder.CreateBitCast(equal.Param(0), llelemty, "")
+ a2ptr := builder.CreateBitCast(equal.Param(1), llelemty, "")
+ loopbb := llvm.AddBasicBlock(equal, "loop")
+ builder.CreateBr(loopbb)
+
+ exitbb := llvm.AddBasicBlock(equal, "exit")
+ retzerobb := llvm.AddBasicBlock(equal, "retzero")
+
+ builder.SetInsertPointAtEnd(loopbb)
+ indexphi := builder.CreatePHI(tm.inttype, "")
+ index := indexphi
+
+ e1ptr := builder.CreateGEP(a1ptr, []llvm.Value{index}, "")
+ e1ptr = builder.CreateBitCast(e1ptr, i8ptr, "")
+ e2ptr := builder.CreateGEP(a2ptr, []llvm.Value{index}, "")
+ e2ptr = builder.CreateBitCast(e2ptr, i8ptr, "")
+
+ equalcall := builder.CreateCall(eequal, []llvm.Value{e1ptr, e2ptr, esize}, "")
+ equaleqzero := builder.CreateICmp(llvm.IntEQ, equalcall, zerobool, "")
+
+ contbb := llvm.AddBasicBlock(equal, "cont")
+ builder.CreateCondBr(equaleqzero, retzerobb, contbb)
+
+ builder.SetInsertPointAtEnd(contbb)
+
+ index = builder.CreateAdd(index, i1, "")
+
+ indexphi.AddIncoming(
+ []llvm.Value{llvm.ConstNull(tm.inttype), index},
+ []llvm.BasicBlock{eqentrybb, contbb},
+ )
+
+ exit := builder.CreateICmp(llvm.IntEQ, index, alen, "")
+ builder.CreateCondBr(exit, exitbb, loopbb)
+
+ builder.SetInsertPointAtEnd(exitbb)
+ builder.CreateRet(onebool)
+
+ builder.SetInsertPointAtEnd(retzerobb)
+ builder.CreateRet(zerobool)
+ }
+
+ tm.algs.Set(at, algorithmFns{hash, equal})
+ return
+}
+
+func (tm *TypeMap) getAlgorithmFunctions(t types.Type) (hash, equal llvm.Value) {
+ switch t := t.Underlying().(type) {
+ case *types.Interface:
+ if t.NumMethods() == 0 {
+ hash = tm.hashFnEmptyInterface
+ equal = tm.equalFnEmptyInterface
+ } else {
+ hash = tm.hashFnInterface
+ equal = tm.equalFnInterface
+ }
+ case *types.Basic:
+ switch t.Kind() {
+ case types.Float32, types.Float64:
+ hash = tm.hashFnFloat
+ equal = tm.equalFnFloat
+ case types.Complex64, types.Complex128:
+ hash = tm.hashFnComplex
+ equal = tm.equalFnComplex
+ case types.String:
+ hash = tm.hashFnString
+ equal = tm.equalFnString
+ default:
+ hash = tm.hashFnIdentity
+ equal = tm.equalFnIdentity
+ }
+ case *types.Signature, *types.Map, *types.Slice:
+ hash = tm.hashFnError
+ equal = tm.equalFnError
+ case *types.Struct:
+ hash, equal = tm.getStructAlgorithmFunctions(t)
+ case *types.Array:
+ hash, equal = tm.getArrayAlgorithmFunctions(t)
+ default:
+ hash = tm.hashFnIdentity
+ equal = tm.equalFnIdentity
+ }
+
+ return
+}
+
+func (tm *TypeMap) getTypeDescInfo(t types.Type) *typeDescInfo {
+ if tdi, ok := tm.types.At(t).(*typeDescInfo); ok {
+ return tdi
+ }
+
+ var b bytes.Buffer
+ tm.mc.mangleTypeDescriptorName(t, &b)
+
+ global := llvm.AddGlobal(tm.module, tm.getTypeDescType(t), b.String())
+ global.SetGlobalConstant(true)
+ ptr := llvm.ConstBitCast(global, llvm.PointerType(tm.commonTypeType, 0))
+
+ gc := llvm.AddGlobal(tm.module, llvm.PointerType(llvm.Int8Type(), 0), b.String()+"$gc")
+ gc.SetGlobalConstant(true)
+ gcPtr := llvm.ConstBitCast(gc, llvm.PointerType(tm.ctx.Int8Type(), 0))
+
+ var mapDescPtr llvm.Value
+ if m, ok := t.Underlying().(*types.Map); ok {
+ var mapb bytes.Buffer
+ tm.mc.mangleMapDescriptorName(t, &mapb)
+
+ mapDescPtr = llvm.AddGlobal(tm.module, tm.mapDescType, mapb.String())
+ mapDescPtr.SetGlobalConstant(true)
+ mapDescPtr.SetLinkage(llvm.LinkOnceODRLinkage)
+ mapDescPtr.SetInitializer(tm.makeMapDesc(ptr, m))
+ }
+
+ tdi := &typeDescInfo{
+ global: global,
+ commonTypePtr: ptr,
+ mapDescPtr: mapDescPtr,
+ gc: gc,
+ gcPtr: gcPtr,
+ }
+ tm.types.Set(t, tdi)
+ return tdi
+}
+
+func (tm *TypeMap) getTypeDescriptorPointer(t types.Type) llvm.Value {
+ return tm.getTypeDescInfo(t).commonTypePtr
+}
+
+func (tm *TypeMap) getMapDescriptorPointer(t types.Type) llvm.Value {
+ return tm.getTypeDescInfo(t).mapDescPtr
+}
+
+func (tm *TypeMap) getGcPointer(t types.Type) llvm.Value {
+ return tm.getTypeDescInfo(t).gcPtr
+}
+
+func (tm *TypeMap) getItabPointer(srctype types.Type, targettype *types.Interface) llvm.Value {
+ if targettype.NumMethods() == 0 {
+ return tm.ToRuntime(srctype)
+ } else {
+ return tm.getImtPointer(srctype, targettype)
+ }
+}
+
+func (tm *TypeMap) getImtPointer(srctype types.Type, targettype *types.Interface) llvm.Value {
+ tdi := tm.getTypeDescInfo(srctype)
+
+ if ptr, ok := tdi.interfaceMethodTables.At(targettype).(llvm.Value); ok {
+ return ptr
+ }
+
+ srcms := tm.MethodSet(srctype)
+ targetms := tm.MethodSet(targettype)
+
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+
+ elems := make([]llvm.Value, targetms.Len()+1)
+ elems[0] = tm.ToRuntime(srctype)
+ for i, targetm := range orderedMethodSet(targetms) {
+ srcm := srcms.Lookup(targetm.Obj().Pkg(), targetm.Obj().Name())
+
+ elems[i+1] = tm.methodResolver.ResolveMethod(srcm).value
+ }
+ imtinit := llvm.ConstArray(i8ptr, elems)
+
+ var b bytes.Buffer
+ tm.mc.mangleImtName(srctype, targettype, &b)
+ imt := llvm.AddGlobal(tm.module, imtinit.Type(), b.String())
+ imt.SetGlobalConstant(true)
+ imt.SetInitializer(imtinit)
+ imt.SetLinkage(llvm.LinkOnceODRLinkage)
+
+ imtptr := llvm.ConstBitCast(imt, i8ptr)
+ tdi.interfaceMethodTables.Set(targettype, imtptr)
+ return imtptr
+}
+
+const (
+ // From gofrontend/types.h
+ gccgoRuntimeTypeKindBOOL = 1
+ gccgoRuntimeTypeKindINT = 2
+ gccgoRuntimeTypeKindINT8 = 3
+ gccgoRuntimeTypeKindINT16 = 4
+ gccgoRuntimeTypeKindINT32 = 5
+ gccgoRuntimeTypeKindINT64 = 6
+ gccgoRuntimeTypeKindUINT = 7
+ gccgoRuntimeTypeKindUINT8 = 8
+ gccgoRuntimeTypeKindUINT16 = 9
+ gccgoRuntimeTypeKindUINT32 = 10
+ gccgoRuntimeTypeKindUINT64 = 11
+ gccgoRuntimeTypeKindUINTPTR = 12
+ gccgoRuntimeTypeKindFLOAT32 = 13
+ gccgoRuntimeTypeKindFLOAT64 = 14
+ gccgoRuntimeTypeKindCOMPLEX64 = 15
+ gccgoRuntimeTypeKindCOMPLEX128 = 16
+ gccgoRuntimeTypeKindARRAY = 17
+ gccgoRuntimeTypeKindCHAN = 18
+ gccgoRuntimeTypeKindFUNC = 19
+ gccgoRuntimeTypeKindINTERFACE = 20
+ gccgoRuntimeTypeKindMAP = 21
+ gccgoRuntimeTypeKindPTR = 22
+ gccgoRuntimeTypeKindSLICE = 23
+ gccgoRuntimeTypeKindSTRING = 24
+ gccgoRuntimeTypeKindSTRUCT = 25
+ gccgoRuntimeTypeKindUNSAFE_POINTER = 26
+ gccgoRuntimeTypeKindNO_POINTERS = (1 << 7)
+)
+
+func hasPointers(t types.Type) bool {
+ switch t := t.(type) {
+ case *types.Basic:
+ return t.Kind() == types.String || t.Kind() == types.UnsafePointer
+
+ case *types.Signature, *types.Pointer, *types.Slice, *types.Map, *types.Chan, *types.Interface:
+ return true
+
+ case *types.Struct:
+ for i := 0; i != t.NumFields(); i++ {
+ if hasPointers(t.Field(i).Type()) {
+ return true
+ }
+ }
+ return false
+
+ case *types.Named:
+ return hasPointers(t.Underlying())
+
+ case *types.Array:
+ return hasPointers(t.Elem())
+
+ default:
+ panic("unrecognized type")
+ }
+}
+
+func runtimeTypeKind(t types.Type) (k uint8) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.Bool:
+ k = gccgoRuntimeTypeKindBOOL
+ case types.Int:
+ k = gccgoRuntimeTypeKindINT
+ case types.Int8:
+ k = gccgoRuntimeTypeKindINT8
+ case types.Int16:
+ k = gccgoRuntimeTypeKindINT16
+ case types.Int32:
+ k = gccgoRuntimeTypeKindINT32
+ case types.Int64:
+ k = gccgoRuntimeTypeKindINT64
+ case types.Uint:
+ k = gccgoRuntimeTypeKindUINT
+ case types.Uint8:
+ k = gccgoRuntimeTypeKindUINT8
+ case types.Uint16:
+ k = gccgoRuntimeTypeKindUINT16
+ case types.Uint32:
+ k = gccgoRuntimeTypeKindUINT32
+ case types.Uint64:
+ k = gccgoRuntimeTypeKindUINT64
+ case types.Uintptr:
+ k = gccgoRuntimeTypeKindUINTPTR
+ case types.Float32:
+ k = gccgoRuntimeTypeKindFLOAT32
+ case types.Float64:
+ k = gccgoRuntimeTypeKindFLOAT64
+ case types.Complex64:
+ k = gccgoRuntimeTypeKindCOMPLEX64
+ case types.Complex128:
+ k = gccgoRuntimeTypeKindCOMPLEX128
+ case types.String:
+ k = gccgoRuntimeTypeKindSTRING
+ case types.UnsafePointer:
+ k = gccgoRuntimeTypeKindUNSAFE_POINTER
+ default:
+ panic("unrecognized builtin type")
+ }
+ case *types.Array:
+ k = gccgoRuntimeTypeKindARRAY
+ case *types.Slice:
+ k = gccgoRuntimeTypeKindSLICE
+ case *types.Struct:
+ k = gccgoRuntimeTypeKindSTRUCT
+ case *types.Pointer:
+ k = gccgoRuntimeTypeKindPTR
+ case *types.Signature:
+ k = gccgoRuntimeTypeKindFUNC
+ case *types.Interface:
+ k = gccgoRuntimeTypeKindINTERFACE
+ case *types.Map:
+ k = gccgoRuntimeTypeKindMAP
+ case *types.Chan:
+ k = gccgoRuntimeTypeKindCHAN
+ case *types.Named:
+ return runtimeTypeKind(t.Underlying())
+ default:
+ panic("unrecognized type")
+ }
+
+ if !hasPointers(t) {
+ k |= gccgoRuntimeTypeKindNO_POINTERS
+ }
+
+ return
+}
+
+func (tm *TypeMap) makeCommonType(t types.Type) llvm.Value {
+ var vals [12]llvm.Value
+ vals[0] = llvm.ConstInt(tm.ctx.Int8Type(), uint64(runtimeTypeKind(t)), false)
+ vals[1] = llvm.ConstInt(tm.ctx.Int8Type(), uint64(tm.Alignof(t)), false)
+ vals[2] = vals[1]
+ vals[3] = llvm.ConstInt(tm.inttype, uint64(tm.Sizeof(t)), false)
+ vals[4] = llvm.ConstInt(tm.ctx.Int32Type(), uint64(tm.getTypeHash(t)), false)
+ hash, equal := tm.getAlgorithmFunctions(t)
+ vals[5] = hash
+ vals[6] = equal
+ vals[7] = tm.getGcPointer(t)
+ var b bytes.Buffer
+ tm.writeType(t, &b)
+ vals[8] = tm.globalStringPtr(b.String())
+ vals[9] = tm.makeUncommonTypePtr(t)
+ if _, ok := t.(*types.Named); ok {
+ vals[10] = tm.getTypeDescriptorPointer(types.NewPointer(t))
+ } else {
+ vals[10] = llvm.ConstPointerNull(llvm.PointerType(tm.commonTypeType, 0))
+ }
+ vals[11] = tm.zeroValue
+
+ return llvm.ConstNamedStruct(tm.commonTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeBasicType(t types.Type, u *types.Basic) llvm.Value {
+ return tm.makeCommonType(t)
+}
+
+func (tm *TypeMap) makeArrayType(t types.Type, a *types.Array) llvm.Value {
+ var vals [4]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ vals[1] = tm.getTypeDescriptorPointer(a.Elem())
+ vals[2] = tm.getTypeDescriptorPointer(types.NewSlice(a.Elem()))
+ vals[3] = llvm.ConstInt(tm.inttype, uint64(a.Len()), false)
+
+ return llvm.ConstNamedStruct(tm.arrayTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeSliceType(t types.Type, s *types.Slice) llvm.Value {
+ var vals [2]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ vals[1] = tm.getTypeDescriptorPointer(s.Elem())
+
+ return llvm.ConstNamedStruct(tm.sliceTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeStructType(t types.Type, s *types.Struct) llvm.Value {
+ var vals [2]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+
+ fieldVars := make([]*types.Var, s.NumFields())
+ for i := range fieldVars {
+ fieldVars[i] = s.Field(i)
+ }
+ offsets := tm.Offsetsof(fieldVars)
+ structFields := make([]llvm.Value, len(fieldVars))
+ for i, field := range fieldVars {
+ var sfvals [5]llvm.Value
+ if !field.Anonymous() {
+ sfvals[0] = tm.globalStringPtr(field.Name())
+ } else {
+ sfvals[0] = llvm.ConstPointerNull(llvm.PointerType(tm.stringType, 0))
+ }
+ if !field.Exported() && field.Pkg() != nil {
+ sfvals[1] = tm.globalStringPtr(field.Pkg().Path())
+ } else {
+ sfvals[1] = llvm.ConstPointerNull(llvm.PointerType(tm.stringType, 0))
+ }
+ sfvals[2] = tm.getTypeDescriptorPointer(field.Type())
+ if tag := s.Tag(i); tag != "" {
+ sfvals[3] = tm.globalStringPtr(tag)
+ } else {
+ sfvals[3] = llvm.ConstPointerNull(llvm.PointerType(tm.stringType, 0))
+ }
+ sfvals[4] = llvm.ConstInt(tm.inttype, uint64(offsets[i]), false)
+
+ structFields[i] = llvm.ConstNamedStruct(tm.structFieldType, sfvals[:])
+ }
+ vals[1] = tm.makeSlice(structFields, tm.structFieldSliceType)
+
+ return llvm.ConstNamedStruct(tm.structTypeType, vals[:])
+}
+
+func (tm *TypeMap) makePointerType(t types.Type, p *types.Pointer) llvm.Value {
+ var vals [2]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ vals[1] = tm.getTypeDescriptorPointer(p.Elem())
+
+ return llvm.ConstNamedStruct(tm.ptrTypeType, vals[:])
+}
+
+func (tm *TypeMap) rtypeSlice(t *types.Tuple) llvm.Value {
+ rtypes := make([]llvm.Value, t.Len())
+ for i := range rtypes {
+ rtypes[i] = tm.getTypeDescriptorPointer(t.At(i).Type())
+ }
+ return tm.makeSlice(rtypes, tm.typeSliceType)
+}
+
+func (tm *TypeMap) makeFuncType(t types.Type, f *types.Signature) llvm.Value {
+ var vals [4]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ // dotdotdot
+ variadic := 0
+ if f.Variadic() {
+ variadic = 1
+ }
+ vals[1] = llvm.ConstInt(llvm.Int8Type(), uint64(variadic), false)
+ // in
+ vals[2] = tm.rtypeSlice(f.Params())
+ // out
+ vals[3] = tm.rtypeSlice(f.Results())
+
+ return llvm.ConstNamedStruct(tm.funcTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeInterfaceType(t types.Type, i *types.Interface) llvm.Value {
+ var vals [2]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+
+ methodset := tm.MethodSet(i)
+ imethods := make([]llvm.Value, methodset.Len())
+ for index, ms := range orderedMethodSet(methodset) {
+ method := ms.Obj()
+ var imvals [3]llvm.Value
+ imvals[0] = tm.globalStringPtr(method.Name())
+ if !method.Exported() && method.Pkg() != nil {
+ imvals[1] = tm.globalStringPtr(method.Pkg().Path())
+ } else {
+ imvals[1] = llvm.ConstPointerNull(llvm.PointerType(tm.stringType, 0))
+ }
+ mtyp := method.Type().(*types.Signature)
+ mftyp := types.NewSignature(nil, nil, mtyp.Params(), mtyp.Results(), mtyp.Variadic())
+ imvals[2] = tm.getTypeDescriptorPointer(mftyp)
+
+ imethods[index] = llvm.ConstNamedStruct(tm.imethodType, imvals[:])
+ }
+ vals[1] = tm.makeSlice(imethods, tm.imethodSliceType)
+
+ return llvm.ConstNamedStruct(tm.interfaceTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeMapType(t types.Type, m *types.Map) llvm.Value {
+ var vals [3]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ vals[1] = tm.getTypeDescriptorPointer(m.Key())
+ vals[2] = tm.getTypeDescriptorPointer(m.Elem())
+
+ return llvm.ConstNamedStruct(tm.mapTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeMapDesc(ptr llvm.Value, m *types.Map) llvm.Value {
+ mapEntryType := structBType{[]backendType{
+ tm.getBackendType(types.Typ[types.UnsafePointer]),
+ tm.getBackendType(m.Key()),
+ tm.getBackendType(m.Elem()),
+ }}.ToLLVM(tm.ctx)
+
+ var vals [4]llvm.Value
+ // map_descriptor
+ vals[0] = ptr
+ // entry_size
+ vals[1] = llvm.ConstInt(tm.inttype, tm.target.TypeAllocSize(mapEntryType), false)
+ // key_offset
+ vals[2] = llvm.ConstInt(tm.inttype, tm.target.ElementOffset(mapEntryType, 1), false)
+ // value_offset
+ vals[3] = llvm.ConstInt(tm.inttype, tm.target.ElementOffset(mapEntryType, 2), false)
+
+ return llvm.ConstNamedStruct(tm.mapDescType, vals[:])
+}
+
+func (tm *TypeMap) makeChanType(t types.Type, c *types.Chan) llvm.Value {
+ var vals [3]llvm.Value
+ vals[0] = tm.makeCommonType(t)
+ vals[1] = tm.getTypeDescriptorPointer(c.Elem())
+
+ // From gofrontend/go/types.cc
+ // These bits must match the ones in libgo/runtime/go-type.h.
+ var dir int
+ switch c.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ vals[2] = llvm.ConstInt(tm.inttype, uint64(dir), false)
+
+ return llvm.ConstNamedStruct(tm.chanTypeType, vals[:])
+}
+
+func (tm *TypeMap) makeUncommonTypePtr(t types.Type) llvm.Value {
+ _, isbasic := t.(*types.Basic)
+ _, isnamed := t.(*types.Named)
+
+ var mset types.MethodSet
+ // We store interface methods on the interface type.
+ if _, ok := t.Underlying().(*types.Interface); !ok {
+ mset = *tm.MethodSet(t)
+ }
+
+ if !isbasic && !isnamed && mset.Len() == 0 {
+ return llvm.ConstPointerNull(llvm.PointerType(tm.uncommonTypeType, 0))
+ }
+
+ var vals [3]llvm.Value
+
+ nullStringPtr := llvm.ConstPointerNull(llvm.PointerType(tm.stringType, 0))
+ vals[0] = nullStringPtr
+ vals[1] = nullStringPtr
+
+ if isbasic || isnamed {
+ nti := tm.mc.getNamedTypeInfo(t)
+ vals[0] = tm.globalStringPtr(nti.name)
+ if nti.pkgpath != "" {
+ path := nti.pkgpath
+ if nti.functionName != "" {
+ path += "." + nti.functionName
+ if nti.scopeNum != 0 {
+ path += "$" + strconv.Itoa(nti.scopeNum)
+ }
+ }
+ vals[1] = tm.globalStringPtr(path)
+ }
+ }
+
+ // Store methods. All methods must be stored, not only exported ones;
+ // this is to allow satisfying of interfaces with non-exported methods.
+ methods := make([]llvm.Value, mset.Len())
+ omset := orderedMethodSet(&mset)
+ for i := range methods {
+ var mvals [5]llvm.Value
+
+ sel := omset[i]
+ mname := sel.Obj().Name()
+ mfunc := tm.methodResolver.ResolveMethod(sel)
+ ftyp := mfunc.Type().(*types.Signature)
+
+ // name
+ mvals[0] = tm.globalStringPtr(mname)
+
+ // pkgPath
+ mvals[1] = nullStringPtr
+ if pkg := sel.Obj().Pkg(); pkg != nil && !sel.Obj().Exported() {
+ mvals[1] = tm.globalStringPtr(pkg.Path())
+ }
+
+ // mtyp (method type, no receiver)
+ mftyp := types.NewSignature(nil, nil, ftyp.Params(), ftyp.Results(), ftyp.Variadic())
+ mvals[2] = tm.getTypeDescriptorPointer(mftyp)
+
+ // typ (function type, with receiver)
+ recvparam := types.NewParam(0, nil, "", t)
+ params := ftyp.Params()
+ rfparams := make([]*types.Var, params.Len()+1)
+ rfparams[0] = recvparam
+ for i := 0; i != ftyp.Params().Len(); i++ {
+ rfparams[i+1] = params.At(i)
+ }
+ rftyp := types.NewSignature(nil, nil, types.NewTuple(rfparams...), ftyp.Results(), ftyp.Variadic())
+ mvals[3] = tm.getTypeDescriptorPointer(rftyp)
+
+ // function
+ mvals[4] = mfunc.value
+
+ methods[i] = llvm.ConstNamedStruct(tm.methodType, mvals[:])
+ }
+
+ vals[2] = tm.makeSlice(methods, tm.methodSliceType)
+
+ uncommonType := llvm.ConstNamedStruct(tm.uncommonTypeType, vals[:])
+
+ uncommonTypePtr := llvm.AddGlobal(tm.module, tm.uncommonTypeType, "")
+ uncommonTypePtr.SetGlobalConstant(true)
+ uncommonTypePtr.SetInitializer(uncommonType)
+ uncommonTypePtr.SetLinkage(llvm.InternalLinkage)
+ return uncommonTypePtr
+}
+
+// globalStringPtr returns a *string with the specified value.
+func (tm *TypeMap) globalStringPtr(value string) llvm.Value {
+ strval := llvm.ConstString(value, false)
+ strglobal := llvm.AddGlobal(tm.module, strval.Type(), "")
+ strglobal.SetGlobalConstant(true)
+ strglobal.SetLinkage(llvm.InternalLinkage)
+ strglobal.SetInitializer(strval)
+ strglobal = llvm.ConstBitCast(strglobal, llvm.PointerType(llvm.Int8Type(), 0))
+ strlen := llvm.ConstInt(tm.inttype, uint64(len(value)), false)
+ str := llvm.ConstStruct([]llvm.Value{strglobal, strlen}, false)
+ g := llvm.AddGlobal(tm.module, str.Type(), "")
+ g.SetGlobalConstant(true)
+ g.SetLinkage(llvm.InternalLinkage)
+ g.SetInitializer(str)
+ return g
+}
+
+func (tm *TypeMap) makeNamedSliceType(tname string, elemtyp llvm.Type) llvm.Type {
+ t := tm.ctx.StructCreateNamed(tname)
+ t.StructSetBody([]llvm.Type{
+ llvm.PointerType(elemtyp, 0),
+ tm.inttype,
+ tm.inttype,
+ }, false)
+ return t
+}
+
+func (tm *TypeMap) makeSlice(values []llvm.Value, slicetyp llvm.Type) llvm.Value {
+ ptrtyp := slicetyp.StructElementTypes()[0]
+ var globalptr llvm.Value
+ if len(values) > 0 {
+ array := llvm.ConstArray(ptrtyp.ElementType(), values)
+ globalptr = llvm.AddGlobal(tm.module, array.Type(), "")
+ globalptr.SetGlobalConstant(true)
+ globalptr.SetLinkage(llvm.InternalLinkage)
+ globalptr.SetInitializer(array)
+ globalptr = llvm.ConstBitCast(globalptr, ptrtyp)
+ } else {
+ globalptr = llvm.ConstNull(ptrtyp)
+ }
+ len_ := llvm.ConstInt(tm.inttype, uint64(len(values)), false)
+ slice := llvm.ConstNull(slicetyp)
+ slice = llvm.ConstInsertValue(slice, globalptr, []uint32{0})
+ slice = llvm.ConstInsertValue(slice, len_, []uint32{1})
+ slice = llvm.ConstInsertValue(slice, len_, []uint32{2})
+ return slice
+}
+
+func isGlobalObject(obj types.Object) bool {
+ pkg := obj.Pkg()
+ return pkg == nil || obj.Parent() == pkg.Scope()
+}
diff --git a/irgen/types.go b/irgen/types.go
new file mode 100644
index 0000000..b9c8973
--- /dev/null
+++ b/irgen/types.go
@@ -0,0 +1,22 @@
+//===- types.go - convenience functions for types -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements convenience functions for dealing with types.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func deref(t types.Type) types.Type {
+ return t.Underlying().(*types.Pointer).Elem()
+}
diff --git a/irgen/utils.go b/irgen/utils.go
new file mode 100644
index 0000000..398a7cb
--- /dev/null
+++ b/irgen/utils.go
@@ -0,0 +1,40 @@
+//===- utils.go - misc utils ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements misellaneous utilities for IR generation.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+func (fr *frame) loadOrNull(cond, ptr llvm.Value, ty types.Type) *govalue {
+ startbb := fr.builder.GetInsertBlock()
+ loadbb := llvm.AddBasicBlock(fr.function, "")
+ contbb := llvm.AddBasicBlock(fr.function, "")
+ fr.builder.CreateCondBr(cond, loadbb, contbb)
+
+ fr.builder.SetInsertPointAtEnd(loadbb)
+ llty := fr.types.ToLLVM(ty)
+ typedptr := fr.builder.CreateBitCast(ptr, llvm.PointerType(llty, 0), "")
+ loadedval := fr.builder.CreateLoad(typedptr, "")
+ fr.builder.CreateBr(contbb)
+
+ fr.builder.SetInsertPointAtEnd(contbb)
+ llv := fr.builder.CreatePHI(llty, "")
+ llv.AddIncoming(
+ []llvm.Value{llvm.ConstNull(llty), loadedval},
+ []llvm.BasicBlock{startbb, loadbb},
+ )
+ return newValue(llv, ty)
+}
diff --git a/irgen/value.go b/irgen/value.go
new file mode 100644
index 0000000..1299981
--- /dev/null
+++ b/irgen/value.go
@@ -0,0 +1,658 @@
+//===- value.go - govalue and operations ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the govalue type, which combines an LLVM value with its Go
+// type, and implements various basic operations on govalues.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+import (
+ "fmt"
+ "go/token"
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llvm/bindings/go/llvm"
+)
+
+// govalue contains an LLVM value and a Go type,
+// representing the result of a Go expression.
+type govalue struct {
+ value llvm.Value
+ typ types.Type
+}
+
+func (v *govalue) String() string {
+ return fmt.Sprintf("[llgo.govalue typ:%s value:%v]", v.typ, v.value)
+}
+
+// Create a new dynamic value from a (LLVM Value, Type) pair.
+func newValue(v llvm.Value, t types.Type) *govalue {
+ return &govalue{v, t}
+}
+
+// TODO(axw) remove this, use .typ directly
+func (v *govalue) Type() types.Type {
+ return v.typ
+}
+
+// newValueFromConst converts a constant value to an LLVM value.
+func (fr *frame) newValueFromConst(v exact.Value, typ types.Type) *govalue {
+ switch {
+ case v == nil:
+ llvmtyp := fr.types.ToLLVM(typ)
+ return newValue(llvm.ConstNull(llvmtyp), typ)
+
+ case isString(typ):
+ if isUntyped(typ) {
+ typ = types.Typ[types.String]
+ }
+ llvmtyp := fr.types.ToLLVM(typ)
+ strval := exact.StringVal(v)
+ strlen := len(strval)
+ i8ptr := llvm.PointerType(llvm.Int8Type(), 0)
+ var ptr llvm.Value
+ if strlen > 0 {
+ init := llvm.ConstString(strval, false)
+ ptr = llvm.AddGlobal(fr.module.Module, init.Type(), "")
+ ptr.SetInitializer(init)
+ ptr.SetLinkage(llvm.InternalLinkage)
+ ptr = llvm.ConstBitCast(ptr, i8ptr)
+ } else {
+ ptr = llvm.ConstNull(i8ptr)
+ }
+ len_ := llvm.ConstInt(fr.types.inttype, uint64(strlen), false)
+ llvmvalue := llvm.Undef(llvmtyp)
+ llvmvalue = llvm.ConstInsertValue(llvmvalue, ptr, []uint32{0})
+ llvmvalue = llvm.ConstInsertValue(llvmvalue, len_, []uint32{1})
+ return newValue(llvmvalue, typ)
+
+ case isInteger(typ):
+ if isUntyped(typ) {
+ typ = types.Typ[types.Int]
+ }
+ llvmtyp := fr.types.ToLLVM(typ)
+ var llvmvalue llvm.Value
+ if isUnsigned(typ) {
+ v, _ := exact.Uint64Val(v)
+ llvmvalue = llvm.ConstInt(llvmtyp, v, false)
+ } else {
+ v, _ := exact.Int64Val(v)
+ llvmvalue = llvm.ConstInt(llvmtyp, uint64(v), true)
+ }
+ return newValue(llvmvalue, typ)
+
+ case isBoolean(typ):
+ if isUntyped(typ) {
+ typ = types.Typ[types.Bool]
+ }
+ return newValue(boolLLVMValue(exact.BoolVal(v)), typ)
+
+ case isFloat(typ):
+ if isUntyped(typ) {
+ typ = types.Typ[types.Float64]
+ }
+ llvmtyp := fr.types.ToLLVM(typ)
+ floatval, _ := exact.Float64Val(v)
+ llvmvalue := llvm.ConstFloat(llvmtyp, floatval)
+ return newValue(llvmvalue, typ)
+
+ case typ == types.Typ[types.UnsafePointer]:
+ llvmtyp := fr.types.ToLLVM(typ)
+ v, _ := exact.Uint64Val(v)
+ llvmvalue := llvm.ConstInt(fr.types.inttype, v, false)
+ llvmvalue = llvm.ConstIntToPtr(llvmvalue, llvmtyp)
+ return newValue(llvmvalue, typ)
+
+ case isComplex(typ):
+ if isUntyped(typ) {
+ typ = types.Typ[types.Complex128]
+ }
+ llvmtyp := fr.types.ToLLVM(typ)
+ floattyp := llvmtyp.StructElementTypes()[0]
+ llvmvalue := llvm.ConstNull(llvmtyp)
+ realv := exact.Real(v)
+ imagv := exact.Imag(v)
+ realfloatval, _ := exact.Float64Val(realv)
+ imagfloatval, _ := exact.Float64Val(imagv)
+ llvmre := llvm.ConstFloat(floattyp, realfloatval)
+ llvmim := llvm.ConstFloat(floattyp, imagfloatval)
+ llvmvalue = llvm.ConstInsertValue(llvmvalue, llvmre, []uint32{0})
+ llvmvalue = llvm.ConstInsertValue(llvmvalue, llvmim, []uint32{1})
+ return newValue(llvmvalue, typ)
+ }
+
+ // Special case for string -> [](byte|rune)
+ if u, ok := typ.Underlying().(*types.Slice); ok && isInteger(u.Elem()) {
+ if v.Kind() == exact.String {
+ strval := fr.newValueFromConst(v, types.Typ[types.String])
+ return fr.convert(strval, typ)
+ }
+ }
+
+ panic(fmt.Sprintf("unhandled: t=%s(%T), v=%v(%T)", typ, typ, v, v))
+}
+
+func (fr *frame) binaryOp(lhs *govalue, op token.Token, rhs *govalue) *govalue {
+ if op == token.NEQ {
+ result := fr.binaryOp(lhs, token.EQL, rhs)
+ return fr.unaryOp(result, token.NOT)
+ }
+
+ var result llvm.Value
+ b := fr.builder
+
+ switch typ := lhs.typ.Underlying().(type) {
+ case *types.Struct:
+ // TODO(axw) use runtime equality algorithm (will be suitably inlined).
+ // For now, we use compare all fields unconditionally and bitwise AND
+ // to avoid branching (i.e. so we don't create additional blocks).
+ value := newValue(boolLLVMValue(true), types.Typ[types.Bool])
+ for i := 0; i < typ.NumFields(); i++ {
+ t := typ.Field(i).Type()
+ lhs := newValue(b.CreateExtractValue(lhs.value, i, ""), t)
+ rhs := newValue(b.CreateExtractValue(rhs.value, i, ""), t)
+ value = fr.binaryOp(value, token.AND, fr.binaryOp(lhs, token.EQL, rhs))
+ }
+ return value
+
+ case *types.Array:
+ // TODO(pcc): as above.
+ value := newValue(boolLLVMValue(true), types.Typ[types.Bool])
+ t := typ.Elem()
+ for i := int64(0); i < typ.Len(); i++ {
+ lhs := newValue(b.CreateExtractValue(lhs.value, int(i), ""), t)
+ rhs := newValue(b.CreateExtractValue(rhs.value, int(i), ""), t)
+ value = fr.binaryOp(value, token.AND, fr.binaryOp(lhs, token.EQL, rhs))
+ }
+ return value
+
+ case *types.Slice:
+ // []T == nil or nil == []T
+ lhsptr := b.CreateExtractValue(lhs.value, 0, "")
+ rhsptr := b.CreateExtractValue(rhs.value, 0, "")
+ isnil := b.CreateICmp(llvm.IntEQ, lhsptr, rhsptr, "")
+ isnil = b.CreateZExt(isnil, llvm.Int8Type(), "")
+ return newValue(isnil, types.Typ[types.Bool])
+
+ case *types.Signature:
+ // func == nil or nil == func
+ isnil := b.CreateICmp(llvm.IntEQ, lhs.value, rhs.value, "")
+ isnil = b.CreateZExt(isnil, llvm.Int8Type(), "")
+ return newValue(isnil, types.Typ[types.Bool])
+
+ case *types.Interface:
+ return fr.compareInterfaces(lhs, rhs)
+ }
+
+ // Strings.
+ if isString(lhs.typ) {
+ if isString(rhs.typ) {
+ switch op {
+ case token.ADD:
+ return fr.concatenateStrings(lhs, rhs)
+ case token.EQL, token.LSS, token.GTR, token.LEQ, token.GEQ:
+ return fr.compareStrings(lhs, rhs, op)
+ default:
+ panic(fmt.Sprint("Unimplemented operator: ", op))
+ }
+ }
+ panic("unimplemented")
+ }
+
+ // Complex numbers.
+ if isComplex(lhs.typ) {
+ // XXX Should we represent complex numbers as vectors?
+ lhsval := lhs.value
+ rhsval := rhs.value
+ a_ := b.CreateExtractValue(lhsval, 0, "")
+ b_ := b.CreateExtractValue(lhsval, 1, "")
+ c_ := b.CreateExtractValue(rhsval, 0, "")
+ d_ := b.CreateExtractValue(rhsval, 1, "")
+ switch op {
+ case token.QUO:
+ // (a+bi)/(c+di) = (ac+bd)/(c**2+d**2) + (bc-ad)/(c**2+d**2)i
+ ac := b.CreateFMul(a_, c_, "")
+ bd := b.CreateFMul(b_, d_, "")
+ bc := b.CreateFMul(b_, c_, "")
+ ad := b.CreateFMul(a_, d_, "")
+ cpow2 := b.CreateFMul(c_, c_, "")
+ dpow2 := b.CreateFMul(d_, d_, "")
+ denom := b.CreateFAdd(cpow2, dpow2, "")
+ realnumer := b.CreateFAdd(ac, bd, "")
+ imagnumer := b.CreateFSub(bc, ad, "")
+ real_ := b.CreateFDiv(realnumer, denom, "")
+ imag_ := b.CreateFDiv(imagnumer, denom, "")
+ lhsval = b.CreateInsertValue(lhsval, real_, 0, "")
+ result = b.CreateInsertValue(lhsval, imag_, 1, "")
+ case token.MUL:
+ // (a+bi)(c+di) = (ac-bd)+(bc+ad)i
+ ac := b.CreateFMul(a_, c_, "")
+ bd := b.CreateFMul(b_, d_, "")
+ bc := b.CreateFMul(b_, c_, "")
+ ad := b.CreateFMul(a_, d_, "")
+ real_ := b.CreateFSub(ac, bd, "")
+ imag_ := b.CreateFAdd(bc, ad, "")
+ lhsval = b.CreateInsertValue(lhsval, real_, 0, "")
+ result = b.CreateInsertValue(lhsval, imag_, 1, "")
+ case token.ADD:
+ real_ := b.CreateFAdd(a_, c_, "")
+ imag_ := b.CreateFAdd(b_, d_, "")
+ lhsval = b.CreateInsertValue(lhsval, real_, 0, "")
+ result = b.CreateInsertValue(lhsval, imag_, 1, "")
+ case token.SUB:
+ real_ := b.CreateFSub(a_, c_, "")
+ imag_ := b.CreateFSub(b_, d_, "")
+ lhsval = b.CreateInsertValue(lhsval, real_, 0, "")
+ result = b.CreateInsertValue(lhsval, imag_, 1, "")
+ case token.EQL:
+ realeq := b.CreateFCmp(llvm.FloatOEQ, a_, c_, "")
+ imageq := b.CreateFCmp(llvm.FloatOEQ, b_, d_, "")
+ result = b.CreateAnd(realeq, imageq, "")
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ default:
+ panic(fmt.Errorf("unhandled operator: %v", op))
+ }
+ return newValue(result, lhs.typ)
+ }
+
+ // Floats and integers.
+ // TODO determine the NaN rules.
+
+ switch op {
+ case token.MUL:
+ if isFloat(lhs.typ) {
+ result = b.CreateFMul(lhs.value, rhs.value, "")
+ } else {
+ result = b.CreateMul(lhs.value, rhs.value, "")
+ }
+ return newValue(result, lhs.typ)
+ case token.QUO:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFDiv(lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateSDiv(lhs.value, rhs.value, "")
+ default:
+ result = b.CreateUDiv(lhs.value, rhs.value, "")
+ }
+ return newValue(result, lhs.typ)
+ case token.REM:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFRem(lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateSRem(lhs.value, rhs.value, "")
+ default:
+ result = b.CreateURem(lhs.value, rhs.value, "")
+ }
+ return newValue(result, lhs.typ)
+ case token.ADD:
+ if isFloat(lhs.typ) {
+ result = b.CreateFAdd(lhs.value, rhs.value, "")
+ } else {
+ result = b.CreateAdd(lhs.value, rhs.value, "")
+ }
+ return newValue(result, lhs.typ)
+ case token.SUB:
+ if isFloat(lhs.typ) {
+ result = b.CreateFSub(lhs.value, rhs.value, "")
+ } else {
+ result = b.CreateSub(lhs.value, rhs.value, "")
+ }
+ return newValue(result, lhs.typ)
+ case token.SHL, token.SHR:
+ return fr.shift(lhs, rhs, op)
+ case token.EQL:
+ if isFloat(lhs.typ) {
+ result = b.CreateFCmp(llvm.FloatOEQ, lhs.value, rhs.value, "")
+ } else {
+ result = b.CreateICmp(llvm.IntEQ, lhs.value, rhs.value, "")
+ }
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ case token.LSS:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFCmp(llvm.FloatOLT, lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateICmp(llvm.IntSLT, lhs.value, rhs.value, "")
+ default:
+ result = b.CreateICmp(llvm.IntULT, lhs.value, rhs.value, "")
+ }
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ case token.LEQ:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFCmp(llvm.FloatOLE, lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateICmp(llvm.IntSLE, lhs.value, rhs.value, "")
+ default:
+ result = b.CreateICmp(llvm.IntULE, lhs.value, rhs.value, "")
+ }
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ case token.GTR:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFCmp(llvm.FloatOGT, lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateICmp(llvm.IntSGT, lhs.value, rhs.value, "")
+ default:
+ result = b.CreateICmp(llvm.IntUGT, lhs.value, rhs.value, "")
+ }
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ case token.GEQ:
+ switch {
+ case isFloat(lhs.typ):
+ result = b.CreateFCmp(llvm.FloatOGE, lhs.value, rhs.value, "")
+ case !isUnsigned(lhs.typ):
+ result = b.CreateICmp(llvm.IntSGE, lhs.value, rhs.value, "")
+ default:
+ result = b.CreateICmp(llvm.IntUGE, lhs.value, rhs.value, "")
+ }
+ result = b.CreateZExt(result, llvm.Int8Type(), "")
+ return newValue(result, types.Typ[types.Bool])
+ case token.AND: // a & b
+ result = b.CreateAnd(lhs.value, rhs.value, "")
+ return newValue(result, lhs.typ)
+ case token.AND_NOT: // a &^ b
+ rhsval := rhs.value
+ rhsval = b.CreateXor(rhsval, llvm.ConstAllOnes(rhsval.Type()), "")
+ result = b.CreateAnd(lhs.value, rhsval, "")
+ return newValue(result, lhs.typ)
+ case token.OR: // a | b
+ result = b.CreateOr(lhs.value, rhs.value, "")
+ return newValue(result, lhs.typ)
+ case token.XOR: // a ^ b
+ result = b.CreateXor(lhs.value, rhs.value, "")
+ return newValue(result, lhs.typ)
+ default:
+ panic(fmt.Sprint("Unimplemented operator: ", op))
+ }
+ panic("unreachable")
+}
+
+func (fr *frame) shift(lhs *govalue, rhs *govalue, op token.Token) *govalue {
+ rhs = fr.convert(rhs, lhs.Type())
+ lhsval := lhs.value
+ bits := rhs.value
+ unsigned := isUnsigned(lhs.Type())
+ // Shifting >= width of lhs yields undefined behaviour, so we must select.
+ max := llvm.ConstInt(bits.Type(), uint64(lhsval.Type().IntTypeWidth()-1), false)
+ var result llvm.Value
+ lessEqualWidth := fr.builder.CreateICmp(llvm.IntULE, bits, max, "")
+ if !unsigned && op == token.SHR {
+ bits := fr.builder.CreateSelect(lessEqualWidth, bits, max, "")
+ result = fr.builder.CreateAShr(lhsval, bits, "")
+ } else {
+ if op == token.SHL {
+ result = fr.builder.CreateShl(lhsval, bits, "")
+ } else {
+ result = fr.builder.CreateLShr(lhsval, bits, "")
+ }
+ zero := llvm.ConstNull(lhsval.Type())
+ result = fr.builder.CreateSelect(lessEqualWidth, result, zero, "")
+ }
+ return newValue(result, lhs.typ)
+}
+
+func (fr *frame) unaryOp(v *govalue, op token.Token) *govalue {
+ switch op {
+ case token.SUB:
+ var value llvm.Value
+ if isComplex(v.typ) {
+ realv := fr.builder.CreateExtractValue(v.value, 0, "")
+ imagv := fr.builder.CreateExtractValue(v.value, 1, "")
+ negzero := llvm.ConstFloatFromString(realv.Type(), "-0")
+ realv = fr.builder.CreateFSub(negzero, realv, "")
+ imagv = fr.builder.CreateFSub(negzero, imagv, "")
+ value = llvm.Undef(v.value.Type())
+ value = fr.builder.CreateInsertValue(value, realv, 0, "")
+ value = fr.builder.CreateInsertValue(value, imagv, 1, "")
+ } else if isFloat(v.typ) {
+ negzero := llvm.ConstFloatFromString(fr.types.ToLLVM(v.Type()), "-0")
+ value = fr.builder.CreateFSub(negzero, v.value, "")
+ } else {
+ value = fr.builder.CreateNeg(v.value, "")
+ }
+ return newValue(value, v.typ)
+ case token.ADD:
+ return v // No-op
+ case token.NOT:
+ value := fr.builder.CreateXor(v.value, boolLLVMValue(true), "")
+ return newValue(value, v.typ)
+ case token.XOR:
+ lhs := v.value
+ rhs := llvm.ConstAllOnes(lhs.Type())
+ value := fr.builder.CreateXor(lhs, rhs, "")
+ return newValue(value, v.typ)
+ default:
+ panic(fmt.Sprintf("Unhandled operator: %s", op))
+ }
+}
+
+func (fr *frame) convert(v *govalue, dsttyp types.Type) *govalue {
+ b := fr.builder
+
+ // If it's a stack allocated value, we'll want to compare the
+ // value type, not the pointer type.
+ srctyp := v.typ
+
+ // Get the underlying type, if any.
+ origdsttyp := dsttyp
+ dsttyp = dsttyp.Underlying()
+ srctyp = srctyp.Underlying()
+
+ // Identical (underlying) types? Just swap in the destination type.
+ if types.Identical(srctyp, dsttyp) {
+ return newValue(v.value, origdsttyp)
+ }
+
+ // Both pointer types with identical underlying types? Same as above.
+ if srctyp, ok := srctyp.(*types.Pointer); ok {
+ if dsttyp, ok := dsttyp.(*types.Pointer); ok {
+ srctyp := srctyp.Elem().Underlying()
+ dsttyp := dsttyp.Elem().Underlying()
+ if types.Identical(srctyp, dsttyp) {
+ return newValue(v.value, origdsttyp)
+ }
+ }
+ }
+
+ // string ->
+ if isString(srctyp) {
+ // (untyped) string -> string
+ // XXX should untyped strings be able to escape go/types?
+ if isString(dsttyp) {
+ return newValue(v.value, origdsttyp)
+ }
+
+ // string -> []byte
+ if isSlice(dsttyp, types.Byte) {
+ value := v.value
+ strdata := fr.builder.CreateExtractValue(value, 0, "")
+ strlen := fr.builder.CreateExtractValue(value, 1, "")
+
+ // Data must be copied, to prevent changes in
+ // the byte slice from mutating the string.
+ newdata := fr.createMalloc(strlen)
+ fr.memcpy(newdata, strdata, strlen)
+
+ struct_ := llvm.Undef(fr.types.ToLLVM(dsttyp))
+ struct_ = fr.builder.CreateInsertValue(struct_, newdata, 0, "")
+ struct_ = fr.builder.CreateInsertValue(struct_, strlen, 1, "")
+ struct_ = fr.builder.CreateInsertValue(struct_, strlen, 2, "")
+ return newValue(struct_, origdsttyp)
+ }
+
+ // string -> []rune
+ if isSlice(dsttyp, types.Rune) {
+ return fr.stringToRuneSlice(v)
+ }
+ }
+
+ // []byte -> string
+ if isSlice(srctyp, types.Byte) && isString(dsttyp) {
+ value := v.value
+ data := fr.builder.CreateExtractValue(value, 0, "")
+ len := fr.builder.CreateExtractValue(value, 1, "")
+
+ // Data must be copied, to prevent changes in
+ // the byte slice from mutating the string.
+ newdata := fr.createMalloc(len)
+ fr.memcpy(newdata, data, len)
+
+ struct_ := llvm.Undef(fr.types.ToLLVM(types.Typ[types.String]))
+ struct_ = fr.builder.CreateInsertValue(struct_, newdata, 0, "")
+ struct_ = fr.builder.CreateInsertValue(struct_, len, 1, "")
+ return newValue(struct_, types.Typ[types.String])
+ }
+
+ // []rune -> string
+ if isSlice(srctyp, types.Rune) && isString(dsttyp) {
+ return fr.runeSliceToString(v)
+ }
+
+ // rune -> string
+ if isString(dsttyp) && isInteger(srctyp) {
+ return fr.runeToString(v)
+ }
+
+ // Unsafe pointer conversions.
+ llvm_type := fr.types.ToLLVM(dsttyp)
+ if dsttyp == types.Typ[types.UnsafePointer] { // X -> unsafe.Pointer
+ if _, isptr := srctyp.(*types.Pointer); isptr {
+ return newValue(v.value, origdsttyp)
+ } else if srctyp == types.Typ[types.Uintptr] {
+ value := b.CreateIntToPtr(v.value, llvm_type, "")
+ return newValue(value, origdsttyp)
+ }
+ } else if srctyp == types.Typ[types.UnsafePointer] { // unsafe.Pointer -> X
+ if _, isptr := dsttyp.(*types.Pointer); isptr {
+ return newValue(v.value, origdsttyp)
+ } else if dsttyp == types.Typ[types.Uintptr] {
+ value := b.CreatePtrToInt(v.value, llvm_type, "")
+ return newValue(value, origdsttyp)
+ }
+ }
+
+ lv := v.value
+ srcType := lv.Type()
+ switch srcType.TypeKind() {
+ case llvm.IntegerTypeKind:
+ switch llvm_type.TypeKind() {
+ case llvm.IntegerTypeKind:
+ srcBits := srcType.IntTypeWidth()
+ dstBits := llvm_type.IntTypeWidth()
+ delta := srcBits - dstBits
+ switch {
+ case delta < 0:
+ if !isUnsigned(srctyp) {
+ lv = b.CreateSExt(lv, llvm_type, "")
+ } else {
+ lv = b.CreateZExt(lv, llvm_type, "")
+ }
+ case delta > 0:
+ lv = b.CreateTrunc(lv, llvm_type, "")
+ }
+ return newValue(lv, origdsttyp)
+ case llvm.FloatTypeKind, llvm.DoubleTypeKind:
+ if !isUnsigned(v.Type()) {
+ lv = b.CreateSIToFP(lv, llvm_type, "")
+ } else {
+ lv = b.CreateUIToFP(lv, llvm_type, "")
+ }
+ return newValue(lv, origdsttyp)
+ }
+ case llvm.DoubleTypeKind:
+ switch llvm_type.TypeKind() {
+ case llvm.FloatTypeKind:
+ lv = b.CreateFPTrunc(lv, llvm_type, "")
+ return newValue(lv, origdsttyp)
+ case llvm.IntegerTypeKind:
+ if !isUnsigned(dsttyp) {
+ lv = b.CreateFPToSI(lv, llvm_type, "")
+ } else {
+ lv = b.CreateFPToUI(lv, llvm_type, "")
+ }
+ return newValue(lv, origdsttyp)
+ }
+ case llvm.FloatTypeKind:
+ switch llvm_type.TypeKind() {
+ case llvm.DoubleTypeKind:
+ lv = b.CreateFPExt(lv, llvm_type, "")
+ return newValue(lv, origdsttyp)
+ case llvm.IntegerTypeKind:
+ if !isUnsigned(dsttyp) {
+ lv = b.CreateFPToSI(lv, llvm_type, "")
+ } else {
+ lv = b.CreateFPToUI(lv, llvm_type, "")
+ }
+ return newValue(lv, origdsttyp)
+ }
+ }
+
+ // Complex -> complex. Complexes are only convertible to other
+ // complexes, contant conversions aside. So we can just check the
+ // source type here; given that the types are not identical
+ // (checked above), we can assume the destination type is the alternate
+ // complex type.
+ if isComplex(srctyp) {
+ var fpcast func(llvm.Builder, llvm.Value, llvm.Type, string) llvm.Value
+ var fptype llvm.Type
+ if srctyp == types.Typ[types.Complex64] {
+ fpcast = (llvm.Builder).CreateFPExt
+ fptype = llvm.DoubleType()
+ } else {
+ fpcast = (llvm.Builder).CreateFPTrunc
+ fptype = llvm.FloatType()
+ }
+ if fpcast != nil {
+ realv := b.CreateExtractValue(lv, 0, "")
+ imagv := b.CreateExtractValue(lv, 1, "")
+ realv = fpcast(b, realv, fptype, "")
+ imagv = fpcast(b, imagv, fptype, "")
+ lv = llvm.Undef(fr.types.ToLLVM(dsttyp))
+ lv = b.CreateInsertValue(lv, realv, 0, "")
+ lv = b.CreateInsertValue(lv, imagv, 1, "")
+ return newValue(lv, origdsttyp)
+ }
+ }
+ panic(fmt.Sprintf("unimplemented conversion: %s (%s) -> %s", v.typ, lv.Type(), origdsttyp))
+}
+
+// extractRealValue extracts the real component of a complex number.
+func (fr *frame) extractRealValue(v *govalue) *govalue {
+ component := fr.builder.CreateExtractValue(v.value, 0, "")
+ if component.Type().TypeKind() == llvm.FloatTypeKind {
+ return newValue(component, types.Typ[types.Float32])
+ }
+ return newValue(component, types.Typ[types.Float64])
+}
+
+// extractRealValue extracts the imaginary component of a complex number.
+func (fr *frame) extractImagValue(v *govalue) *govalue {
+ component := fr.builder.CreateExtractValue(v.value, 1, "")
+ if component.Type().TypeKind() == llvm.FloatTypeKind {
+ return newValue(component, types.Typ[types.Float32])
+ }
+ return newValue(component, types.Typ[types.Float64])
+}
+
+func boolLLVMValue(v bool) (lv llvm.Value) {
+ if v {
+ return llvm.ConstInt(llvm.Int8Type(), 1, false)
+ }
+ return llvm.ConstNull(llvm.Int8Type())
+}
diff --git a/irgen/version.go b/irgen/version.go
new file mode 100644
index 0000000..5cba220
--- /dev/null
+++ b/irgen/version.go
@@ -0,0 +1,23 @@
+//===- version.go - version info ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file specifies the Go version supported by the IR generator.
+//
+//===----------------------------------------------------------------------===//
+
+package irgen
+
+const (
+ goVersion = "go1.3"
+)
+
+// GoVersion returns the version of Go that we are targeting.
+func GoVersion() string {
+ return goVersion
+}
diff --git a/libgo-noext.diff b/libgo-noext.diff
new file mode 100644
index 0000000..2277270
--- /dev/null
+++ b/libgo-noext.diff
@@ -0,0 +1,1788 @@
+diff -r 225a208260a6 libgo/runtime/chan.goc
+--- a/libgo/runtime/chan.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/chan.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -115,7 +115,7 @@
+ mysg.releasetime = -1;
+ }
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ if(raceenabled)
+ runtime_racereadpc(c, pc, chansend);
+ if(c->closed)
+@@ -128,7 +128,7 @@
+ if(sg != nil) {
+ if(raceenabled)
+ racesync(c, sg);
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+
+ gp = sg->g;
+ gp->param = sg;
+@@ -141,7 +141,7 @@
+ }
+
+ if(!block) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return false;
+ }
+
+@@ -150,10 +150,10 @@
+ mysg.selectdone = nil;
+ g->param = nil;
+ enqueue(&c->sendq, &mysg);
+- runtime_parkunlock(c, "chan send");
++ runtime_parkunlock(&c->lock, "chan send");
+
+ if(g->param == nil) {
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ if(!c->closed)
+ runtime_throw("chansend: spurious wakeup");
+ goto closed;
+@@ -170,16 +170,16 @@
+
+ if(c->qcount >= c->dataqsiz) {
+ if(!block) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return false;
+ }
+ mysg.g = g;
+ mysg.elem = nil;
+ mysg.selectdone = nil;
+ enqueue(&c->sendq, &mysg);
+- runtime_parkunlock(c, "chan send");
++ runtime_parkunlock(&c->lock, "chan send");
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ goto asynch;
+ }
+
+@@ -196,18 +196,18 @@
+ sg = dequeue(&c->recvq);
+ if(sg != nil) {
+ gp = sg->g;
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+
+ closed:
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_panicstring("send on closed channel");
+ return false; // not reached
+ }
+@@ -247,7 +247,7 @@
+ mysg.releasetime = -1;
+ }
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ if(c->dataqsiz > 0)
+ goto asynch;
+
+@@ -258,7 +258,7 @@
+ if(sg != nil) {
+ if(raceenabled)
+ racesync(c, sg);
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+
+ if(ep != nil)
+ runtime_memmove(ep, sg->elem, c->elemsize);
+@@ -274,7 +274,7 @@
+ }
+
+ if(!block) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return false;
+ }
+
+@@ -283,10 +283,10 @@
+ mysg.selectdone = nil;
+ g->param = nil;
+ enqueue(&c->recvq, &mysg);
+- runtime_parkunlock(c, "chan receive");
++ runtime_parkunlock(&c->lock, "chan receive");
+
+ if(g->param == nil) {
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ if(!c->closed)
+ runtime_throw("chanrecv: spurious wakeup");
+ goto closed;
+@@ -304,7 +304,7 @@
+ goto closed;
+
+ if(!block) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ if(received != nil)
+ *received = false;
+ return false;
+@@ -313,9 +313,9 @@
+ mysg.elem = nil;
+ mysg.selectdone = nil;
+ enqueue(&c->recvq, &mysg);
+- runtime_parkunlock(c, "chan receive");
++ runtime_parkunlock(&c->lock, "chan receive");
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ goto asynch;
+ }
+
+@@ -334,12 +334,12 @@
+ sg = dequeue(&c->sendq);
+ if(sg != nil) {
+ gp = sg->g;
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ if(sg->releasetime)
+ sg->releasetime = runtime_cputicks();
+ runtime_ready(gp);
+ } else
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+
+ if(received != nil)
+ *received = true;
+@@ -354,7 +354,7 @@
+ *received = false;
+ if(raceenabled)
+ runtime_raceacquire(c);
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ if(mysg.releasetime > 0)
+ runtime_blockevent(mysg.releasetime - t0, 2);
+ return true;
+@@ -628,7 +628,7 @@
+ c0 = sel->lockorder[i];
+ if(c0 && c0 != c) {
+ c = sel->lockorder[i];
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ }
+ }
+ }
+@@ -656,7 +656,7 @@
+ c = sel->lockorder[i];
+ if(i>0 && sel->lockorder[i-1] == c)
+ continue; // will unlock it on the next iteration
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ }
+ }
+
+@@ -1071,9 +1071,9 @@
+ if(runtime_gcwaiting())
+ runtime_gosched();
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ if(c->closed) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_panicstring("close of closed channel");
+ }
+
+@@ -1108,7 +1108,7 @@
+ runtime_ready(gp);
+ }
+
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ }
+
+ void
+diff -r 225a208260a6 libgo/runtime/chan.h
+--- a/libgo/runtime/chan.h Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/chan.h Tue Sep 23 15:59:57 2014 -0700
+@@ -39,7 +39,7 @@
+ uintgo recvx; // receive index
+ WaitQ recvq; // list of recv waiters
+ WaitQ sendq; // list of send waiters
+- Lock;
++ Lock lock;
+ };
+
+ // Buffer follows Hchan immediately in memory.
+diff -r 225a208260a6 libgo/runtime/heapdump.c
+--- a/libgo/runtime/heapdump.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/heapdump.c Tue Sep 23 15:59:57 2014 -0700
+@@ -387,7 +387,7 @@
+ if(sp->kind != KindSpecialFinalizer)
+ continue;
+ spf = (SpecialFinalizer*)sp;
+- p = (byte*)((s->start << PageShift) + spf->offset);
++ p = (byte*)((s->start << PageShift) + spf->special.offset);
+ dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
+ }
+ }
+@@ -566,7 +566,7 @@
+ if(sp->kind != KindSpecialProfile)
+ continue;
+ spp = (SpecialProfile*)sp;
+- p = (byte*)((s->start << PageShift) + spp->offset);
++ p = (byte*)((s->start << PageShift) + spp->special.offset);
+ dumpint(TagAllocSample);
+ dumpint((uintptr)p);
+ dumpint((uintptr)spp->b);
+diff -r 225a208260a6 libgo/runtime/malloc.goc
+--- a/libgo/runtime/malloc.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/malloc.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -440,9 +440,9 @@
+ m->mcache->local_nlookup++;
+ if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
+ // purge cache stats to prevent overflow
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ runtime_purgecachedstats(m->mcache);
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ }
+
+ s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
+@@ -743,7 +743,7 @@
+
+ static struct
+ {
+- Lock;
++ Lock lock;
+ byte* pos;
+ byte* end;
+ } persistent;
+@@ -772,19 +772,19 @@
+ align = 8;
+ if(size >= PersistentAllocMaxBlock)
+ return runtime_SysAlloc(size, stat);
+- runtime_lock(&persistent);
++ runtime_lock(&persistent.lock);
+ persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
+ if(persistent.pos + size > persistent.end) {
+ persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
+ if(persistent.pos == nil) {
+- runtime_unlock(&persistent);
++ runtime_unlock(&persistent.lock);
+ runtime_throw("runtime: cannot allocate memory");
+ }
+ persistent.end = persistent.pos + PersistentAllocChunk;
+ }
+ p = persistent.pos;
+ persistent.pos += size;
+- runtime_unlock(&persistent);
++ runtime_unlock(&persistent.lock);
+ if(stat != &mstats.other_sys) {
+ // reaccount the allocation against provided stat
+ runtime_xadd64(stat, size);
+diff -r 225a208260a6 libgo/runtime/malloc.h
+--- a/libgo/runtime/malloc.h Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/malloc.h Tue Sep 23 15:59:57 2014 -0700
+@@ -390,7 +390,7 @@
+ typedef struct SpecialFinalizer SpecialFinalizer;
+ struct SpecialFinalizer
+ {
+- Special;
++ Special special;
+ FuncVal* fn;
+ const FuncType* ft;
+ const PtrType* ot;
+@@ -401,7 +401,7 @@
+ typedef struct SpecialProfile SpecialProfile;
+ struct SpecialProfile
+ {
+- Special;
++ Special special;
+ Bucket* b;
+ };
+
+@@ -458,7 +458,7 @@
+ // Central list of free objects of a given size.
+ struct MCentral
+ {
+- Lock;
++ Lock lock;
+ int32 sizeclass;
+ MSpan nonempty; // list of spans with a free object
+ MSpan empty; // list of spans with no free objects (or cached in an MCache)
+@@ -476,7 +476,7 @@
+ // but all the other global data is here too.
+ struct MHeap
+ {
+- Lock;
++ Lock lock;
+ MSpan free[MaxMHeapList]; // free lists of given length
+ MSpan freelarge; // free lists length >= MaxMHeapList
+ MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
+@@ -505,7 +505,7 @@
+ // spaced CacheLineSize bytes apart, so that each MCentral.Lock
+ // gets its own cache line.
+ struct {
+- MCentral;
++ MCentral mcentral;
+ byte pad[64];
+ } central[NumSizeClasses];
+
+diff -r 225a208260a6 libgo/runtime/mcache.c
+--- a/libgo/runtime/mcache.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/mcache.c Tue Sep 23 15:59:57 2014 -0700
+@@ -23,9 +23,9 @@
+ MCache *c;
+ int32 i;
+
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ runtime_memclr((byte*)c, sizeof(*c));
+ for(i = 0; i < NumSizeClasses; i++)
+ c->alloc[i] = &emptymspan;
+@@ -44,10 +44,10 @@
+ runtime_freemcache(MCache *c)
+ {
+ runtime_MCache_ReleaseAll(c);
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ runtime_purgecachedstats(c);
+ runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ }
+
+ // Gets a span that has a free object in it and assigns it
+@@ -64,19 +64,19 @@
+ if(s->freelist != nil)
+ runtime_throw("refill on a nonempty span");
+ if(s != &emptymspan)
+- runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);
++ runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass].mcentral, s);
+
+ // Push any explicitly freed objects to the central lists.
+ // Not required, but it seems like a good time to do it.
+ l = &c->free[sizeclass];
+ if(l->nlist > 0) {
+- runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
++ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
+
+ // Get a new cached span from the central lists.
+- s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
++ s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass].mcentral);
+ if(s == nil)
+ runtime_throw("out of memory");
+ if(s->freelist == nil) {
+@@ -102,7 +102,7 @@
+ // We transfer a span at a time from MCentral to MCache,
+ // so we'll do the same in the other direction.
+ if(l->nlist >= (runtime_class_to_allocnpages[sizeclass]<<PageShift)/size) {
+- runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
++ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
+@@ -118,12 +118,12 @@
+ for(i=0; i<NumSizeClasses; i++) {
+ s = c->alloc[i];
+ if(s != &emptymspan) {
+- runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
++ runtime_MCentral_UncacheSpan(&runtime_mheap.central[i].mcentral, s);
+ c->alloc[i] = &emptymspan;
+ }
+ l = &c->free[i];
+ if(l->nlist > 0) {
+- runtime_MCentral_FreeList(&runtime_mheap.central[i], l->list);
++ runtime_MCentral_FreeList(&runtime_mheap.central[i].mcentral, l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
+diff -r 225a208260a6 libgo/runtime/mcentral.c
+--- a/libgo/runtime/mcentral.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/mcentral.c Tue Sep 23 15:59:57 2014 -0700
+@@ -39,14 +39,14 @@
+ int32 cap, n;
+ uint32 sg;
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ sg = runtime_mheap.sweepgen;
+ retry:
+ for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
+ if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_MSpan_Sweep(s);
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ // the span could have been moved to heap, retry
+ goto retry;
+ }
+@@ -65,9 +65,9 @@
+ runtime_MSpanList_Remove(s);
+ // swept spans are at the end of the list
+ runtime_MSpanList_InsertBack(&c->empty, s);
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_MSpan_Sweep(s);
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ // the span could be moved to nonempty or heap, retry
+ goto retry;
+ }
+@@ -82,7 +82,7 @@
+
+ // Replenish central list if empty.
+ if(!MCentral_Grow(c)) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return nil;
+ }
+ goto retry;
+@@ -98,7 +98,7 @@
+ runtime_MSpanList_Remove(s);
+ runtime_MSpanList_InsertBack(&c->empty, s);
+ s->incache = true;
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return s;
+ }
+
+@@ -109,7 +109,7 @@
+ MLink *v;
+ int32 cap, n;
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+
+ s->incache = false;
+
+@@ -135,7 +135,7 @@
+ runtime_MSpanList_Remove(s);
+ runtime_MSpanList_Insert(&c->nonempty, s);
+ }
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ }
+
+ // Free the list of objects back into the central free list c.
+@@ -145,12 +145,12 @@
+ {
+ MLink *next;
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ for(; start != nil; start = next) {
+ next = start->next;
+ MCentral_Free(c, start);
+ }
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ }
+
+ // Helper: free one object back into the central free list.
+@@ -193,7 +193,7 @@
+ // If s is completely freed, return it to the heap.
+ if(s->ref == 0) {
+ MCentral_ReturnToHeap(c, s); // unlocks c
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ }
+ }
+
+@@ -206,7 +206,7 @@
+ {
+ if(s->incache)
+ runtime_throw("freespan into cached span");
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+
+ // Move to nonempty if necessary.
+ if(s->freelist == nil) {
+@@ -227,7 +227,7 @@
+ runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
+
+ if(s->ref != 0) {
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ return false;
+ }
+
+@@ -260,12 +260,12 @@
+ byte *p;
+ MSpan *s;
+
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
+ s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
+ if(s == nil) {
+ // TODO(rsc): Log out of memory
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ return false;
+ }
+
+@@ -282,7 +282,7 @@
+ *tailp = nil;
+ runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
+
+- runtime_lock(c);
++ runtime_lock(&c->lock);
+ c->nfree += n;
+ runtime_MSpanList_Insert(&c->nonempty, s);
+ return true;
+@@ -301,7 +301,7 @@
+ if(s->ref != 0)
+ runtime_throw("ref wrong");
+ c->nfree -= (s->npages << PageShift) / size;
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ runtime_MHeap_Free(&runtime_mheap, s, 0);
+ }
+diff -r 225a208260a6 libgo/runtime/mgc0.c
+--- a/libgo/runtime/mgc0.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/mgc0.c Tue Sep 23 15:59:57 2014 -0700
+@@ -225,7 +225,7 @@
+ Note alldone;
+ ParFor *markfor;
+
+- Lock;
++ Lock lock;
+ byte *chunk;
+ uintptr nchunk;
+ } work __attribute__((aligned(8)));
+@@ -1337,7 +1337,7 @@
+ // retain everything it points to.
+ spf = (SpecialFinalizer*)sp;
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+- p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
++ p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
+ enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->ft, PtrSize, 0});
+@@ -1378,7 +1378,7 @@
+ b = (Workbuf*)runtime_lfstackpop(&work.empty);
+ if(b == nil) {
+ // Need to allocate.
+- runtime_lock(&work);
++ runtime_lock(&work.lock);
+ if(work.nchunk < sizeof *b) {
+ work.nchunk = 1<<20;
+ work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
+@@ -1388,7 +1388,7 @@
+ b = (Workbuf*)work.chunk;
+ work.chunk += sizeof *b;
+ work.nchunk -= sizeof *b;
+- runtime_unlock(&work);
++ runtime_unlock(&work.lock);
+ }
+ b->nobj = 0;
+ return b;
+@@ -1802,7 +1802,7 @@
+ c->local_nsmallfree[cl] += nfree;
+ c->local_cachealloc -= nfree * size;
+ runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
+- res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
++ res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl].mcentral, s, nfree, head.next, end);
+ //MCentral_FreeSpan updates sweepgen
+ }
+ return res;
+@@ -2147,10 +2147,10 @@
+ return;
+
+ if(gcpercent == GcpercentUnknown) { // first time through
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ if(gcpercent == GcpercentUnknown)
+ gcpercent = readgogc();
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ }
+ if(gcpercent < 0)
+ return;
+@@ -2421,7 +2421,7 @@
+
+ // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
+ p = (uint64*)pauses->array;
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ n = mstats.numgc;
+ if(n > nelem(mstats.pause_ns))
+ n = nelem(mstats.pause_ns);
+@@ -2436,7 +2436,7 @@
+ p[n] = mstats.last_gc;
+ p[n+1] = mstats.numgc;
+ p[n+2] = mstats.pause_total_ns;
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ pauses->__count = n+3;
+ }
+
+@@ -2444,14 +2444,14 @@
+ runtime_setgcpercent(int32 in) {
+ int32 out;
+
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ if(gcpercent == GcpercentUnknown)
+ gcpercent = readgogc();
+ out = gcpercent;
+ if(in < 0)
+ in = -1;
+ gcpercent = in;
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ return out;
+ }
+
+diff -r 225a208260a6 libgo/runtime/mheap.c
+--- a/libgo/runtime/mheap.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/mheap.c Tue Sep 23 15:59:57 2014 -0700
+@@ -70,7 +70,7 @@
+ runtime_MSpanList_Init(&h->freelarge);
+ runtime_MSpanList_Init(&h->busylarge);
+ for(i=0; i<nelem(h->central); i++)
+- runtime_MCentral_Init(&h->central[i], i);
++ runtime_MCentral_Init(&h->central[i].mcentral, i);
+ }
+
+ void
+@@ -109,9 +109,9 @@
+ runtime_MSpanList_Remove(s);
+ // swept spans are at the end of the list
+ runtime_MSpanList_InsertBack(list, s);
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ n += runtime_MSpan_Sweep(s);
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ if(n >= npages)
+ return n;
+ // the span could have been moved elsewhere
+@@ -156,7 +156,7 @@
+ }
+
+ // Now sweep everything that is not yet swept.
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ for(;;) {
+ n = runtime_sweepone();
+ if(n == (uintptr)-1) // all spans are swept
+@@ -165,7 +165,7 @@
+ if(reclaimed >= npage)
+ break;
+ }
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ }
+
+ // Allocate a new span of npage pages from the heap
+@@ -175,7 +175,7 @@
+ {
+ MSpan *s;
+
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
+ runtime_m()->mcache->local_cachealloc = 0;
+ s = MHeap_AllocLocked(h, npage, sizeclass);
+@@ -191,7 +191,7 @@
+ runtime_MSpanList_InsertBack(&h->busylarge, s);
+ }
+ }
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ if(s != nil) {
+ if(needzero && s->needzero)
+ runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+@@ -386,7 +386,7 @@
+ void
+ runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
+ {
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
+ runtime_m()->mcache->local_cachealloc = 0;
+ mstats.heap_inuse -= s->npages<<PageShift;
+@@ -395,7 +395,7 @@
+ mstats.heap_objects--;
+ }
+ MHeap_FreeLocked(h, s);
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ }
+
+ static void
+@@ -548,10 +548,10 @@
+ runtime_noteclear(¬e);
+ runtime_notetsleepg(¬e, tick);
+
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ unixnow = runtime_unixnanotime();
+ if(unixnow - mstats.last_gc > forcegc) {
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ // The scavenger can not block other goroutines,
+ // otherwise deadlock detector can fire spuriously.
+ // GC blocks other goroutines via the runtime_worldsema.
+@@ -561,11 +561,11 @@
+ runtime_notetsleepg(¬e, -1);
+ if(runtime_debug.gctrace > 0)
+ runtime_printf("scvg%d: GC forced\n", k);
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ }
+ now = runtime_nanotime();
+ scavenge(k, now, limit);
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ }
+ }
+
+@@ -575,9 +575,9 @@
+ runtime_debug_freeOSMemory(void)
+ {
+ runtime_gc(2); // force GC and do eager sweep
+- runtime_lock(&runtime_mheap);
++ runtime_lock(&runtime_mheap.lock);
+ scavenge(-1, ~(uintptr)0, 0);
+- runtime_unlock(&runtime_mheap);
++ runtime_unlock(&runtime_mheap.lock);
+ }
+
+ // Initialize a new span with the given start and npages.
+@@ -752,11 +752,11 @@
+ runtime_lock(&runtime_mheap.speciallock);
+ s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
+ runtime_unlock(&runtime_mheap.speciallock);
+- s->kind = KindSpecialFinalizer;
++ s->special.kind = KindSpecialFinalizer;
+ s->fn = f;
+ s->ft = ft;
+ s->ot = ot;
+- if(addspecial(p, s))
++ if(addspecial(p, &s->special))
+ return true;
+
+ // There was an old finalizer
+@@ -789,9 +789,9 @@
+ runtime_lock(&runtime_mheap.speciallock);
+ s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
+ runtime_unlock(&runtime_mheap.speciallock);
+- s->kind = KindSpecialProfile;
++ s->special.kind = KindSpecialProfile;
+ s->b = b;
+- if(!addspecial(p, s))
++ if(!addspecial(p, &s->special))
+ runtime_throw("setprofilebucket: profile already set");
+ }
+
+@@ -879,14 +879,14 @@
+ // remove the span from whatever list it is in now
+ if(s->sizeclass > 0) {
+ // must be in h->central[x].empty
+- c = &h->central[s->sizeclass];
+- runtime_lock(c);
++ c = &h->central[s->sizeclass].mcentral;
++ runtime_lock(&c->lock);
+ runtime_MSpanList_Remove(s);
+- runtime_unlock(c);
+- runtime_lock(h);
++ runtime_unlock(&c->lock);
++ runtime_lock(&h->lock);
+ } else {
+ // must be in h->busy/busylarge
+- runtime_lock(h);
++ runtime_lock(&h->lock);
+ runtime_MSpanList_Remove(s);
+ }
+ // heap is locked now
+@@ -933,18 +933,18 @@
+
+ // place the span into a new list
+ if(s->sizeclass > 0) {
+- runtime_unlock(h);
+- c = &h->central[s->sizeclass];
+- runtime_lock(c);
++ runtime_unlock(&h->lock);
++ c = &h->central[s->sizeclass].mcentral;
++ runtime_lock(&c->lock);
+ // swept spans are at the end of the list
+ runtime_MSpanList_InsertBack(&c->empty, s);
+- runtime_unlock(c);
++ runtime_unlock(&c->lock);
+ } else {
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime_MSpanList_InsertBack(&h->busylarge, s);
+- runtime_unlock(h);
++ runtime_unlock(&h->lock);
+ }
+ }
+diff -r 225a208260a6 libgo/runtime/netpoll.goc
+--- a/libgo/runtime/netpoll.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/netpoll.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -53,7 +53,7 @@
+ // pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
+ // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
+ // in a lock-free way by all operations.
+- Lock; // protectes the following fields
++ Lock lock; // protectes the following fields
+ uintptr fd;
+ bool closing;
+ uintptr seq; // protects from stale timers and ready notifications
+@@ -68,7 +68,7 @@
+
+ static struct
+ {
+- Lock;
++ Lock lock;
+ PollDesc* first;
+ // PollDesc objects must be type-stable,
+ // because we can get ready notification from epoll/kqueue
+@@ -100,7 +100,7 @@
+
+ func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
+ pd = allocPollDesc();
+- runtime_lock(pd);
++ runtime_lock(&pd->lock);
+ if(pd->wg != nil && pd->wg != READY)
+ runtime_throw("runtime_pollOpen: blocked write on free descriptor");
+ if(pd->rg != nil && pd->rg != READY)
+@@ -112,7 +112,7 @@
+ pd->rd = 0;
+ pd->wg = nil;
+ pd->wd = 0;
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+
+ errno = runtime_netpollopen(fd, pd);
+ }
+@@ -125,10 +125,10 @@
+ if(pd->rg != nil && pd->rg != READY)
+ runtime_throw("runtime_pollClose: blocked read on closing descriptor");
+ runtime_netpollclose(pd->fd);
+- runtime_lock(&pollcache);
++ runtime_lock(&pollcache.lock);
+ pd->link = pollcache.first;
+ pollcache.first = pd;
+- runtime_unlock(&pollcache);
++ runtime_unlock(&pollcache.lock);
+ }
+
+ func runtime_pollReset(pd *PollDesc, mode int) (err int) {
+@@ -169,9 +169,9 @@
+ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
+ G *rg, *wg;
+
+- runtime_lock(pd);
++ runtime_lock(&pd->lock);
+ if(pd->closing) {
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ return;
+ }
+ pd->seq++; // invalidate current timers
+@@ -223,7 +223,7 @@
+ rg = netpollunblock(pd, 'r', false);
+ if(pd->wd < 0)
+ wg = netpollunblock(pd, 'w', false);
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ if(rg)
+ runtime_ready(rg);
+ if(wg)
+@@ -233,7 +233,7 @@
+ func runtime_pollUnblock(pd *PollDesc) {
+ G *rg, *wg;
+
+- runtime_lock(pd);
++ runtime_lock(&pd->lock);
+ if(pd->closing)
+ runtime_throw("runtime_pollUnblock: already closing");
+ pd->closing = true;
+@@ -249,7 +249,7 @@
+ runtime_deltimer(&pd->wt);
+ pd->wt.fv = nil;
+ }
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ if(rg)
+ runtime_ready(rg);
+ if(wg)
+@@ -277,13 +277,13 @@
+ void
+ runtime_netpolllock(PollDesc *pd)
+ {
+- runtime_lock(pd);
++ runtime_lock(&pd->lock);
+ }
+
+ void
+ runtime_netpollunlock(PollDesc *pd)
+ {
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ }
+
+ // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
+@@ -401,10 +401,10 @@
+ // If it's stale, ignore the timer event.
+ seq = (uintptr)arg.type;
+ rg = wg = nil;
+- runtime_lock(pd);
++ runtime_lock(&pd->lock);
+ if(seq != pd->seq) {
+ // The descriptor was reused or timers were reset.
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ return;
+ }
+ if(read) {
+@@ -421,7 +421,7 @@
+ runtime_atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
+ wg = netpollunblock(pd, 'w', false);
+ }
+- runtime_unlock(pd);
++ runtime_unlock(&pd->lock);
+ if(rg)
+ runtime_ready(rg);
+ if(wg)
+@@ -452,7 +452,7 @@
+ PollDesc *pd;
+ uint32 i, n;
+
+- runtime_lock(&pollcache);
++ runtime_lock(&pollcache.lock);
+ if(pollcache.first == nil) {
+ n = PollBlockSize/sizeof(*pd);
+ if(n == 0)
+@@ -467,6 +467,6 @@
+ }
+ pd = pollcache.first;
+ pollcache.first = pd->link;
+- runtime_unlock(&pollcache);
++ runtime_unlock(&pollcache.lock);
+ return pd;
+ }
+diff -r 225a208260a6 libgo/runtime/proc.c
+--- a/libgo/runtime/proc.c Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/proc.c Tue Sep 23 15:59:57 2014 -0700
+@@ -357,7 +357,7 @@
+
+ typedef struct Sched Sched;
+ struct Sched {
+- Lock;
++ Lock lock;
+
+ uint64 goidgen;
+ M* midle; // idle m's waiting for work
+@@ -770,7 +770,7 @@
+
+ mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ mp->id = runtime_sched.mcount++;
+ checkmcount();
+ runtime_mpreinit(mp);
+@@ -781,7 +781,7 @@
+ // runtime_NumCgoCall() iterates over allm w/o schedlock,
+ // so we need to publish it safely.
+ runtime_atomicstorep(&runtime_allm, mp);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ // Mark gp ready to run.
+@@ -808,7 +808,7 @@
+
+ // Figure out how many CPUs to use during GC.
+ // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ n = runtime_gomaxprocs;
+ if(n > runtime_ncpu)
+ n = runtime_ncpu > 0 ? runtime_ncpu : 1;
+@@ -816,7 +816,7 @@
+ n = MaxGcproc;
+ if(n > runtime_sched.nmidle+1) // one M is currently running
+ n = runtime_sched.nmidle+1;
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return n;
+ }
+
+@@ -825,14 +825,14 @@
+ {
+ int32 n;
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ n = runtime_gomaxprocs;
+ if(n > runtime_ncpu)
+ n = runtime_ncpu;
+ if(n > MaxGcproc)
+ n = MaxGcproc;
+ n -= runtime_sched.nmidle+1; // one M is currently running
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return n > 0;
+ }
+
+@@ -842,7 +842,7 @@
+ M *mp;
+ int32 n, pos;
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ pos = 0;
+ for(n = 1; n < nproc; n++) { // one M is currently running
+ if(runtime_allp[pos]->mcache == m->mcache)
+@@ -855,7 +855,7 @@
+ pos++;
+ runtime_notewakeup(&mp->park);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ // Similar to stoptheworld but best-effort and can be called several times.
+@@ -894,7 +894,7 @@
+ P *p;
+ bool wait;
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ runtime_sched.stopwait = runtime_gomaxprocs;
+ runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
+ preemptall();
+@@ -914,7 +914,7 @@
+ runtime_sched.stopwait--;
+ }
+ wait = runtime_sched.stopwait > 0;
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+
+ // wait for remaining P's to stop voluntarily
+ if(wait) {
+@@ -948,7 +948,7 @@
+ gp = runtime_netpoll(false); // non-blocking
+ injectglist(gp);
+ add = needaddgcproc();
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(newprocs) {
+ procresize(newprocs);
+ newprocs = 0;
+@@ -972,7 +972,7 @@
+ runtime_sched.sysmonwait = false;
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+
+ while(p1) {
+ p = p1;
+@@ -1404,9 +1404,9 @@
+ }
+
+ retry:
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ mput(m);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ runtime_notesleep(&m->park);
+ runtime_noteclear(&m->park);
+ if(m->helpgc) {
+@@ -1433,18 +1433,18 @@
+ M *mp;
+ void (*fn)(void);
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(p == nil) {
+ p = pidleget();
+ if(p == nil) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(spinning)
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ return;
+ }
+ }
+ mp = mget();
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(mp == nil) {
+ fn = nil;
+ if(spinning)
+@@ -1477,28 +1477,28 @@
+ startm(p, true);
+ return;
+ }
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(runtime_sched.gcwaiting) {
+ p->status = Pgcstop;
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return;
+ }
+ if(runtime_sched.runqsize) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ startm(p, false);
+ return;
+ }
+ // If this is the last running P and nobody is polling network,
+ // need to wakeup another M to poll network.
+ if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ startm(p, false);
+ return;
+ }
+ pidleput(p);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ // Tries to add one more P to execute G's.
+@@ -1570,11 +1570,11 @@
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ }
+ p = releasep();
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ p->status = Pgcstop;
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ stopm();
+ }
+
+@@ -1625,9 +1625,9 @@
+ return gp;
+ // global runq
+ if(runtime_sched.runqsize) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ gp = globrunqget(m->p, 0);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(gp)
+ return gp;
+ }
+@@ -1661,19 +1661,19 @@
+ }
+ stop:
+ // return P and block
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(runtime_sched.gcwaiting) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ goto top;
+ }
+ if(runtime_sched.runqsize) {
+ gp = globrunqget(m->p, 0);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return gp;
+ }
+ p = releasep();
+ pidleput(p);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(m->spinning) {
+ m->spinning = false;
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+@@ -1682,9 +1682,9 @@
+ for(i = 0; i < runtime_gomaxprocs; i++) {
+ p = runtime_allp[i];
+ if(p && p->runqhead != p->runqtail) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ p = pidleget();
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(p) {
+ acquirep(p);
+ goto top;
+@@ -1701,9 +1701,9 @@
+ gp = runtime_netpoll(true); // block until new work is available
+ runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
+ if(gp) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ p = pidleget();
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(p) {
+ acquirep(p);
+ injectglist(gp->schedlink);
+@@ -1746,14 +1746,14 @@
+
+ if(glist == nil)
+ return;
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ for(n = 0; glist; n++) {
+ gp = glist;
+ glist = gp->schedlink;
+ gp->status = Grunnable;
+ globrunqput(gp);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+
+ for(; n && runtime_sched.npidle; n--)
+ startm(nil, false);
+@@ -1784,9 +1784,9 @@
+ // This is a fancy way to say tick%61==0,
+ // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
+ if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ gp = globrunqget(m->p, 1);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(gp)
+ resetspinning();
+ }
+@@ -1880,9 +1880,9 @@
+ gp->status = Grunnable;
+ gp->m = nil;
+ m->curg = nil;
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ globrunqput(gp);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(m->lockedg) {
+ stoplockedm();
+ execute(gp); // Never returns.
+@@ -1985,24 +1985,24 @@
+ g->status = Gsyscall;
+
+ if(runtime_atomicload(&runtime_sched.sysmonwait)) { // TODO: fast atomic
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(runtime_atomicload(&runtime_sched.sysmonwait)) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ m->mcache = nil;
+ m->p->m = nil;
+ runtime_atomicstore(&m->p->status, Psyscall);
+ if(runtime_sched.gcwaiting) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ m->locks--;
+@@ -2113,13 +2113,13 @@
+ // Try to get any other idle P.
+ m->p = nil;
+ if(runtime_sched.pidle) {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ p = pidleget();
+ if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(p) {
+ acquirep(p);
+ return true;
+@@ -2138,7 +2138,7 @@
+ gp->status = Grunnable;
+ gp->m = nil;
+ m->curg = nil;
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ p = pidleget();
+ if(p == nil)
+ globrunqput(gp);
+@@ -2146,7 +2146,7 @@
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ if(p) {
+ acquirep(p);
+ execute(gp); // Never returns.
+@@ -2425,13 +2425,13 @@
+
+ if(n > MaxGomaxprocs)
+ n = MaxGomaxprocs;
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ ret = runtime_gomaxprocs;
+ if(n <= 0 || n == ret) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return ret;
+ }
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+
+ runtime_semacquire(&runtime_worldsema, false);
+ m->gcing = 1;
+@@ -2536,7 +2536,7 @@
+ }
+
+ static struct {
+- Lock;
++ Lock lock;
+ void (*fn)(uintptr*, int32);
+ int32 hz;
+ uintptr pcbuf[TracebackMaxFrames];
+@@ -2568,9 +2568,9 @@
+ if(mp->mcache == nil)
+ traceback = false;
+
+- runtime_lock(&prof);
++ runtime_lock(&prof.lock);
+ if(prof.fn == nil) {
+- runtime_unlock(&prof);
++ runtime_unlock(&prof.lock);
+ mp->mallocing--;
+ return;
+ }
+@@ -2598,7 +2598,7 @@
+ prof.pcbuf[1] = (uintptr)System;
+ }
+ prof.fn(prof.pcbuf, n);
+- runtime_unlock(&prof);
++ runtime_unlock(&prof.lock);
+ mp->mallocing--;
+ }
+
+@@ -2623,13 +2623,13 @@
+ // it would deadlock.
+ runtime_resetcpuprofiler(0);
+
+- runtime_lock(&prof);
++ runtime_lock(&prof.lock);
+ prof.fn = fn;
+ prof.hz = hz;
+- runtime_unlock(&prof);
+- runtime_lock(&runtime_sched);
++ runtime_unlock(&prof.lock);
++ runtime_lock(&runtime_sched.lock);
+ runtime_sched.profilehz = hz;
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+
+ if(hz != 0)
+ runtime_resetcpuprofiler(hz);
+@@ -2767,11 +2767,11 @@
+ static void
+ incidlelocked(int32 v)
+ {
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ runtime_sched.nmidlelocked += v;
+ if(v > 0)
+ checkdead();
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ // Check for deadlock situation.
+@@ -2840,16 +2840,16 @@
+ runtime_usleep(delay);
+ if(runtime_debug.schedtrace <= 0 &&
+ (runtime_sched.gcwaiting || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs)) { // TODO: fast atomic
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ if(runtime_atomicload(&runtime_sched.gcwaiting) || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 1);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ runtime_notesleep(&runtime_sched.sysmonnote);
+ runtime_noteclear(&runtime_sched.sysmonnote);
+ idle = 0;
+ delay = 20;
+ } else
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+ // poll network if not polled for more than 10ms
+ lastpoll = runtime_atomicload64(&runtime_sched.lastpoll);
+@@ -2978,7 +2978,7 @@
+ if(starttime == 0)
+ starttime = now;
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ runtime_printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
+ (now-starttime)/1000000, runtime_gomaxprocs, runtime_sched.npidle, runtime_sched.mcount,
+ runtime_sched.nmidle, runtime_sched.runqsize);
+@@ -3014,7 +3014,7 @@
+ }
+ }
+ if(!detailed) {
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return;
+ }
+ for(mp = runtime_allm; mp; mp = mp->alllink) {
+@@ -3046,7 +3046,7 @@
+ lockedm ? lockedm->id : -1);
+ }
+ runtime_unlock(&allglock);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ }
+
+ // Put mp on midle list.
+@@ -3202,9 +3202,9 @@
+ for(i=0; i<n; i++)
+ batch[i]->schedlink = batch[i+1];
+ // Now put the batch on global queue.
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ globrunqputbatch(batch[0], batch[n], n+1);
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return true;
+ }
+
+@@ -3356,11 +3356,11 @@
+ {
+ int32 out;
+
+- runtime_lock(&runtime_sched);
++ runtime_lock(&runtime_sched.lock);
+ out = runtime_sched.maxmcount;
+ runtime_sched.maxmcount = in;
+ checkmcount();
+- runtime_unlock(&runtime_sched);
++ runtime_unlock(&runtime_sched.lock);
+ return out;
+ }
+
+diff -r 225a208260a6 libgo/runtime/runtime.h
+--- a/libgo/runtime/runtime.h Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/runtime.h Tue Sep 23 15:59:57 2014 -0700
+@@ -286,7 +286,7 @@
+
+ struct P
+ {
+- Lock;
++ Lock lock;
+
+ int32 id;
+ uint32 status; // one of Pidle/Prunning/...
+@@ -384,7 +384,7 @@
+
+ struct Timers
+ {
+- Lock;
++ Lock lock;
+ G *timerproc;
+ bool sleeping;
+ bool rescheduling;
+diff -r 225a208260a6 libgo/runtime/sema.goc
+--- a/libgo/runtime/sema.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/sema.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -35,7 +35,7 @@
+ typedef struct SemaRoot SemaRoot;
+ struct SemaRoot
+ {
+- Lock;
++ Lock lock;
+ SemaWaiter* head;
+ SemaWaiter* tail;
+ // Number of waiters. Read w/o the lock.
+@@ -47,7 +47,7 @@
+
+ struct semtable
+ {
+- SemaRoot;
++ SemaRoot root;
+ uint8 pad[CacheLineSize-sizeof(SemaRoot)];
+ };
+ static struct semtable semtable[SEMTABLESZ];
+@@ -55,7 +55,7 @@
+ static SemaRoot*
+ semroot(uint32 volatile *addr)
+ {
+- return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
++ return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root;
+ }
+
+ static void
+@@ -124,19 +124,19 @@
+ }
+ for(;;) {
+
+- runtime_lock(root);
++ runtime_lock(&root->lock);
+ // Add ourselves to nwait to disable "easy case" in semrelease.
+ runtime_xadd(&root->nwait, 1);
+ // Check cansemacquire to avoid missed wakeup.
+ if(cansemacquire(addr)) {
+ runtime_xadd(&root->nwait, -1);
+- runtime_unlock(root);
++ runtime_unlock(&root->lock);
+ return;
+ }
+ // Any semrelease after the cansemacquire knows we're waiting
+ // (we set nwait above), so go to sleep.
+ semqueue(root, addr, &s);
+- runtime_parkunlock(root, "semacquire");
++ runtime_parkunlock(&root->lock, "semacquire");
+ if(cansemacquire(addr)) {
+ if(t0)
+ runtime_blockevent(s.releasetime - t0, 3);
+@@ -161,11 +161,11 @@
+ return;
+
+ // Harder case: search for a waiter and wake it.
+- runtime_lock(root);
++ runtime_lock(&root->lock);
+ if(runtime_atomicload(&root->nwait) == 0) {
+ // The count is already consumed by another goroutine,
+ // so no need to wake up another goroutine.
+- runtime_unlock(root);
++ runtime_unlock(&root->lock);
+ return;
+ }
+ for(s = root->head; s; s = s->next) {
+@@ -175,7 +175,7 @@
+ break;
+ }
+ }
+- runtime_unlock(root);
++ runtime_unlock(&root->lock);
+ if(s) {
+ if(s->releasetime)
+ s->releasetime = runtime_cputicks();
+@@ -211,7 +211,7 @@
+ typedef struct SyncSema SyncSema;
+ struct SyncSema
+ {
+- Lock;
++ Lock lock;
+ SemaWaiter* head;
+ SemaWaiter* tail;
+ };
+@@ -238,7 +238,7 @@
+ w.releasetime = -1;
+ }
+
+- runtime_lock(s);
++ runtime_lock(&s->lock);
+ if(s->head && s->head->nrelease > 0) {
+ // have pending release, consume it
+ wake = nil;
+@@ -249,7 +249,7 @@
+ if(s->head == nil)
+ s->tail = nil;
+ }
+- runtime_unlock(s);
++ runtime_unlock(&s->lock);
+ if(wake)
+ runtime_ready(wake->g);
+ } else {
+@@ -259,7 +259,7 @@
+ else
+ s->tail->next = &w;
+ s->tail = &w;
+- runtime_parkunlock(s, "semacquire");
++ runtime_parkunlock(&s->lock, "semacquire");
+ if(t0)
+ runtime_blockevent(w.releasetime - t0, 2);
+ }
+@@ -274,7 +274,7 @@
+ w.next = nil;
+ w.releasetime = 0;
+
+- runtime_lock(s);
++ runtime_lock(&s->lock);
+ while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
+ // have pending acquire, satisfy it
+ wake = s->head;
+@@ -293,7 +293,7 @@
+ else
+ s->tail->next = &w;
+ s->tail = &w;
+- runtime_parkunlock(s, "semarelease");
++ runtime_parkunlock(&s->lock, "semarelease");
+ } else
+- runtime_unlock(s);
++ runtime_unlock(&s->lock);
+ }
+diff -r 225a208260a6 libgo/runtime/sigqueue.goc
+--- a/libgo/runtime/sigqueue.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/sigqueue.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -32,7 +32,7 @@
+ #include "defs.h"
+
+ static struct {
+- Note;
++ Note note;
+ uint32 mask[(NSIG+31)/32];
+ uint32 wanted[(NSIG+31)/32];
+ uint32 state;
+@@ -70,7 +70,7 @@
+ new = HASSIGNAL;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (old == HASWAITER)
+- runtime_notewakeup(&sig);
++ runtime_notewakeup(&sig.note);
+ break;
+ }
+ }
+@@ -107,8 +107,8 @@
+ new = HASWAITER;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (new == HASWAITER) {
+- runtime_notetsleepg(&sig, -1);
+- runtime_noteclear(&sig);
++ runtime_notetsleepg(&sig.note, -1);
++ runtime_noteclear(&sig.note);
+ }
+ break;
+ }
+@@ -138,7 +138,7 @@
+ // to use for initialization. It does not pass
+ // signal information in m.
+ sig.inuse = true; // enable reception of signals; cannot disable
+- runtime_noteclear(&sig);
++ runtime_noteclear(&sig.note);
+ return;
+ }
+
+diff -r 225a208260a6 libgo/runtime/time.goc
+--- a/libgo/runtime/time.goc Mon Sep 22 14:14:24 2014 -0700
++++ b/libgo/runtime/time.goc Tue Sep 23 15:59:57 2014 -0700
+@@ -94,17 +94,17 @@
+ t.period = 0;
+ t.fv = &readyv;
+ t.arg.__object = g;
+- runtime_lock(&timers);
++ runtime_lock(&timers.lock);
+ addtimer(&t);
+- runtime_parkunlock(&timers, reason);
++ runtime_parkunlock(&timers.lock, reason);
+ }
+
+ void
+ runtime_addtimer(Timer *t)
+ {
+- runtime_lock(&timers);
++ runtime_lock(&timers.lock);
+ addtimer(t);
+- runtime_unlock(&timers);
++ runtime_unlock(&timers.lock);
+ }
+
+ // Add a timer to the heap and start or kick the timer proc
+@@ -169,14 +169,14 @@
+ i = t->i;
+ gi = i;
+
+- runtime_lock(&timers);
++ runtime_lock(&timers.lock);
+
+ // t may not be registered anymore and may have
+ // a bogus i (typically 0, if generated by Go).
+ // Verify it before proceeding.
+ i = t->i;
+ if(i < 0 || i >= timers.len || timers.t[i] != t) {
+- runtime_unlock(&timers);
++ runtime_unlock(&timers.lock);
+ return false;
+ }
+
+@@ -192,7 +192,7 @@
+ }
+ if(debug)
+ dumptimers("deltimer");
+- runtime_unlock(&timers);
++ runtime_unlock(&timers.lock);
+ return true;
+ }
+
+@@ -210,7 +210,7 @@
+ Eface arg;
+
+ for(;;) {
+- runtime_lock(&timers);
++ runtime_lock(&timers.lock);
+ timers.sleeping = false;
+ now = runtime_nanotime();
+ for(;;) {
+@@ -236,7 +236,7 @@
+ fv = t->fv;
+ f = (void*)t->fv->fn;
+ arg = t->arg;
+- runtime_unlock(&timers);
++ runtime_unlock(&timers.lock);
+ if(raceenabled)
+ runtime_raceacquire(t);
+ __go_set_closure(fv);
+@@ -249,20 +249,20 @@
+ arg.__object = nil;
+ USED(&arg);
+
+- runtime_lock(&timers);
++ runtime_lock(&timers.lock);
+ }
+ if(delta < 0) {
+ // No timers left - put goroutine to sleep.
+ timers.rescheduling = true;
+ runtime_g()->isbackground = true;
+- runtime_parkunlock(&timers, "timer goroutine (idle)");
++ runtime_parkunlock(&timers.lock, "timer goroutine (idle)");
+ runtime_g()->isbackground = false;
+ continue;
+ }
+ // At least one timer pending. Sleep until then.
+ timers.sleeping = true;
+ runtime_noteclear(&timers.waitnote);
+- runtime_unlock(&timers);
++ runtime_unlock(&timers.lock);
+ runtime_notetsleepg(&timers.waitnote, delta);
+ }
+ }
diff --git a/llgo-go.sh b/llgo-go.sh
new file mode 100644
index 0000000..a29be01
--- /dev/null
+++ b/llgo-go.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+
+scriptpath=$(which "$0")
+scriptpath=$(readlink -f "$scriptpath")
+bindir=$(dirname "$scriptpath")
+prefix=$(dirname "$bindir")
+
+cmd="$1"
+
+case "$cmd" in
+build | get | install | run | test)
+ shift
+ PATH="$prefix/lib/llgo/go-path:$PATH" exec go "$cmd" -compiler gccgo "$@"
+ ;;
+
+*)
+ exec go "$@"
+ ;;
+esac
diff --git a/mvifdiff.sh b/mvifdiff.sh
new file mode 100755
index 0000000..1b8bac6
--- /dev/null
+++ b/mvifdiff.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+
+# The mvifdiff.sh script works like the mv(1) command, except
+# that it does not touch the destination file if its contents
+# are the same as the source file.
+
+if cmp -s "$1" "$2" ; then
+ rm "$1"
+else
+ mv "$1" "$2"
+fi
diff --git a/ssaopt/esc.go b/ssaopt/esc.go
new file mode 100644
index 0000000..88e0931
--- /dev/null
+++ b/ssaopt/esc.go
@@ -0,0 +1,94 @@
+// Copyright 2014 The llgo Authors.
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package ssaopt
+
+import (
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+func escapes(val ssa.Value, bb *ssa.BasicBlock, pending []ssa.Value) bool {
+ for _, p := range pending {
+ if val == p {
+ return false
+ }
+ }
+
+ for _, ref := range *val.Referrers() {
+ switch ref := ref.(type) {
+ case *ssa.Phi:
+ // We must consider the variable to have escaped if it is
+ // possible for the program to see more than one "version"
+ // of the variable at once, as this requires the program
+ // to use heap allocation for the multiple versions.
+ //
+ // I (pcc) think that this is only possible (without stores)
+ // in the case where a phi node that (directly or indirectly)
+ // refers to the allocation dominates the allocation.
+ if ref.Block().Dominates(bb) {
+ return true
+ }
+ if escapes(ref, bb, append(pending, val)) {
+ return true
+ }
+
+ case *ssa.BinOp, *ssa.ChangeType, *ssa.Convert, *ssa.ChangeInterface, *ssa.MakeInterface, *ssa.Slice, *ssa.FieldAddr, *ssa.IndexAddr, *ssa.TypeAssert, *ssa.Extract:
+ if escapes(ref.(ssa.Value), bb, append(pending, val)) {
+ return true
+ }
+
+ case *ssa.Range, *ssa.DebugRef:
+ continue
+
+ case *ssa.UnOp:
+ if ref.Op == token.MUL || ref.Op == token.ARROW {
+ continue
+ }
+ if escapes(ref, bb, append(pending, val)) {
+ return true
+ }
+
+ case *ssa.Store:
+ if val == ref.Val {
+ return true
+ }
+
+ case *ssa.Call:
+ if builtin, ok := ref.Call.Value.(*ssa.Builtin); ok {
+ switch builtin.Name() {
+ case "cap", "len", "copy", "ssa:wrapnilchk":
+ continue
+ case "append":
+ if ref.Call.Args[0] == val && escapes(ref, bb, append(pending, val)) {
+ return true
+ }
+ default:
+ return true
+ }
+ } else {
+ return true
+ }
+
+ default:
+ return true
+ }
+ }
+
+ return false
+}
+
+func LowerAllocsToStack(f *ssa.Function) {
+ pending := make([]ssa.Value, 0, 10)
+
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if alloc, ok := instr.(*ssa.Alloc); ok && alloc.Heap && !escapes(alloc, alloc.Block(), pending) {
+ alloc.Heap = false
+ f.Locals = append(f.Locals, alloc)
+ }
+ }
+ }
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
new file mode 100644
index 0000000..641ade8
--- /dev/null
+++ b/test/CMakeLists.txt
@@ -0,0 +1,15 @@
+configure_lit_site_cfg(
+ ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
+ ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
+ )
+
+add_lit_testsuite(check-llgo "Running the llgo regression tests"
+ ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS
+ FileCheck
+ count
+ llgo
+ libgo
+ not
+ )
+set_target_properties(check-llgo PROPERTIES FOLDER "Tests")
diff --git a/test/debuginfo/emptyname.go b/test/debuginfo/emptyname.go
new file mode 100644
index 0000000..28ad10d
--- /dev/null
+++ b/test/debuginfo/emptyname.go
@@ -0,0 +1,7 @@
+// RUN: llgo -c -o /dev/null -g %s
+
+package main
+
+//line :1
+func main() {
+}
diff --git a/test/driver/parse-arguments.go b/test/driver/parse-arguments.go
new file mode 100644
index 0000000..36b3c5c
--- /dev/null
+++ b/test/driver/parse-arguments.go
@@ -0,0 +1,17 @@
+// RUN: not llgo -B 2>&1 | FileCheck --check-prefix=B %s
+// RUN: not llgo -D 2>&1 | FileCheck --check-prefix=D %s
+// RUN: not llgo -I 2>&1 | FileCheck --check-prefix=I %s
+// RUN: not llgo -isystem 2>&1 | FileCheck --check-prefix=isystem %s
+// RUN: not llgo -L 2>&1 | FileCheck --check-prefix=L %s
+// RUN: not llgo -fload-plugin 2>&1 | FileCheck --check-prefix=fload-plugin %s
+// RUN: not llgo -mllvm 2>&1 | FileCheck --check-prefix=mllvm %s
+// RUN: not llgo -o 2>&1 | FileCheck --check-prefix=o %s
+
+// B: missing argument after '-B'
+// D: missing argument after '-D'
+// I: missing argument after '-I'
+// isystem: missing argument after '-isystem'
+// L: missing argument after '-L'
+// fload-plugin: missing argument after '-fload-plugin'
+// mllvm: missing argument after '-mllvm'
+// o: missing argument after '-o'
diff --git a/test/execution/Inputs/init2.go b/test/execution/Inputs/init2.go
new file mode 100644
index 0000000..041d764
--- /dev/null
+++ b/test/execution/Inputs/init2.go
@@ -0,0 +1,5 @@
+package main
+
+func init() {
+ println("do some other stuff before main")
+}
diff --git a/test/execution/arrays/compare.go b/test/execution/arrays/compare.go
new file mode 100644
index 0000000..b99d4fd
--- /dev/null
+++ b/test/execution/arrays/compare.go
@@ -0,0 +1,18 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+
+package main
+
+func main() {
+ a := [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ b := [...]int{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ c := [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+
+ println(a == b)
+ println(a == c)
+ println(b == c)
+}
diff --git a/test/execution/arrays/index.go b/test/execution/arrays/index.go
new file mode 100644
index 0000000..78c5315
--- /dev/null
+++ b/test/execution/arrays/index.go
@@ -0,0 +1,288 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 6
+// CHECK-NEXT: 7
+// CHECK-NEXT: 8
+// CHECK-NEXT: 9
+// CHECK-NEXT: 10
+// CHECK-NEXT: 11
+// CHECK-NEXT: 12
+// CHECK-NEXT: 13
+// CHECK-NEXT: 14
+// CHECK-NEXT: 15
+// CHECK-NEXT: 16
+// CHECK-NEXT: 17
+// CHECK-NEXT: 18
+// CHECK-NEXT: 19
+// CHECK-NEXT: 20
+// CHECK-NEXT: 21
+// CHECK-NEXT: 22
+// CHECK-NEXT: 23
+// CHECK-NEXT: 24
+// CHECK-NEXT: 25
+// CHECK-NEXT: 26
+// CHECK-NEXT: 27
+// CHECK-NEXT: 28
+// CHECK-NEXT: 29
+// CHECK-NEXT: 30
+// CHECK-NEXT: 31
+// CHECK-NEXT: 32
+// CHECK-NEXT: 33
+// CHECK-NEXT: 34
+// CHECK-NEXT: 35
+// CHECK-NEXT: 36
+// CHECK-NEXT: 37
+// CHECK-NEXT: 38
+// CHECK-NEXT: 39
+// CHECK-NEXT: 40
+// CHECK-NEXT: 41
+// CHECK-NEXT: 42
+// CHECK-NEXT: 43
+// CHECK-NEXT: 44
+// CHECK-NEXT: 45
+// CHECK-NEXT: 46
+// CHECK-NEXT: 47
+// CHECK-NEXT: 48
+// CHECK-NEXT: 49
+// CHECK-NEXT: 50
+// CHECK-NEXT: 51
+// CHECK-NEXT: 52
+// CHECK-NEXT: 53
+// CHECK-NEXT: 54
+// CHECK-NEXT: 55
+// CHECK-NEXT: 56
+// CHECK-NEXT: 57
+// CHECK-NEXT: 58
+// CHECK-NEXT: 59
+// CHECK-NEXT: 60
+// CHECK-NEXT: 61
+// CHECK-NEXT: 62
+// CHECK-NEXT: 63
+// CHECK-NEXT: 64
+// CHECK-NEXT: 65
+// CHECK-NEXT: 66
+// CHECK-NEXT: 67
+// CHECK-NEXT: 68
+// CHECK-NEXT: 69
+// CHECK-NEXT: 70
+// CHECK-NEXT: 71
+// CHECK-NEXT: 72
+// CHECK-NEXT: 73
+// CHECK-NEXT: 74
+// CHECK-NEXT: 75
+// CHECK-NEXT: 76
+// CHECK-NEXT: 77
+// CHECK-NEXT: 78
+// CHECK-NEXT: 79
+// CHECK-NEXT: 80
+// CHECK-NEXT: 81
+// CHECK-NEXT: 82
+// CHECK-NEXT: 83
+// CHECK-NEXT: 84
+// CHECK-NEXT: 85
+// CHECK-NEXT: 86
+// CHECK-NEXT: 87
+// CHECK-NEXT: 88
+// CHECK-NEXT: 89
+// CHECK-NEXT: 90
+// CHECK-NEXT: 91
+// CHECK-NEXT: 92
+// CHECK-NEXT: 93
+// CHECK-NEXT: 94
+// CHECK-NEXT: 95
+// CHECK-NEXT: 96
+// CHECK-NEXT: 97
+// CHECK-NEXT: 98
+// CHECK-NEXT: 99
+// CHECK-NEXT: 100
+// CHECK-NEXT: 101
+// CHECK-NEXT: 102
+// CHECK-NEXT: 103
+// CHECK-NEXT: 104
+// CHECK-NEXT: 105
+// CHECK-NEXT: 106
+// CHECK-NEXT: 107
+// CHECK-NEXT: 108
+// CHECK-NEXT: 109
+// CHECK-NEXT: 110
+// CHECK-NEXT: 111
+// CHECK-NEXT: 112
+// CHECK-NEXT: 113
+// CHECK-NEXT: 114
+// CHECK-NEXT: 115
+// CHECK-NEXT: 116
+// CHECK-NEXT: 117
+// CHECK-NEXT: 118
+// CHECK-NEXT: 119
+// CHECK-NEXT: 120
+// CHECK-NEXT: 121
+// CHECK-NEXT: 122
+// CHECK-NEXT: 123
+// CHECK-NEXT: 124
+// CHECK-NEXT: 125
+// CHECK-NEXT: 126
+// CHECK-NEXT: 127
+// CHECK-NEXT: 128
+// CHECK-NEXT: 129
+// CHECK-NEXT: 130
+// CHECK-NEXT: 131
+// CHECK-NEXT: 132
+// CHECK-NEXT: 133
+// CHECK-NEXT: 134
+// CHECK-NEXT: 135
+// CHECK-NEXT: 136
+// CHECK-NEXT: 137
+// CHECK-NEXT: 138
+// CHECK-NEXT: 139
+// CHECK-NEXT: 140
+// CHECK-NEXT: 141
+// CHECK-NEXT: 142
+// CHECK-NEXT: 143
+// CHECK-NEXT: 144
+// CHECK-NEXT: 145
+// CHECK-NEXT: 146
+// CHECK-NEXT: 147
+// CHECK-NEXT: 148
+// CHECK-NEXT: 149
+// CHECK-NEXT: 150
+// CHECK-NEXT: 151
+// CHECK-NEXT: 152
+// CHECK-NEXT: 153
+// CHECK-NEXT: 154
+// CHECK-NEXT: 155
+// CHECK-NEXT: 156
+// CHECK-NEXT: 157
+// CHECK-NEXT: 158
+// CHECK-NEXT: 159
+// CHECK-NEXT: 160
+// CHECK-NEXT: 161
+// CHECK-NEXT: 162
+// CHECK-NEXT: 163
+// CHECK-NEXT: 164
+// CHECK-NEXT: 165
+// CHECK-NEXT: 166
+// CHECK-NEXT: 167
+// CHECK-NEXT: 168
+// CHECK-NEXT: 169
+// CHECK-NEXT: 170
+// CHECK-NEXT: 171
+// CHECK-NEXT: 172
+// CHECK-NEXT: 173
+// CHECK-NEXT: 174
+// CHECK-NEXT: 175
+// CHECK-NEXT: 176
+// CHECK-NEXT: 177
+// CHECK-NEXT: 178
+// CHECK-NEXT: 179
+// CHECK-NEXT: 180
+// CHECK-NEXT: 181
+// CHECK-NEXT: 182
+// CHECK-NEXT: 183
+// CHECK-NEXT: 184
+// CHECK-NEXT: 185
+// CHECK-NEXT: 186
+// CHECK-NEXT: 187
+// CHECK-NEXT: 188
+// CHECK-NEXT: 189
+// CHECK-NEXT: 190
+// CHECK-NEXT: 191
+// CHECK-NEXT: 192
+// CHECK-NEXT: 193
+// CHECK-NEXT: 194
+// CHECK-NEXT: 195
+// CHECK-NEXT: 196
+// CHECK-NEXT: 197
+// CHECK-NEXT: 198
+// CHECK-NEXT: 199
+// CHECK-NEXT: 200
+// CHECK-NEXT: 201
+// CHECK-NEXT: 202
+// CHECK-NEXT: 203
+// CHECK-NEXT: 204
+// CHECK-NEXT: 205
+// CHECK-NEXT: 206
+// CHECK-NEXT: 207
+// CHECK-NEXT: 208
+// CHECK-NEXT: 209
+// CHECK-NEXT: 210
+// CHECK-NEXT: 211
+// CHECK-NEXT: 212
+// CHECK-NEXT: 213
+// CHECK-NEXT: 214
+// CHECK-NEXT: 215
+// CHECK-NEXT: 216
+// CHECK-NEXT: 217
+// CHECK-NEXT: 218
+// CHECK-NEXT: 219
+// CHECK-NEXT: 220
+// CHECK-NEXT: 221
+// CHECK-NEXT: 222
+// CHECK-NEXT: 223
+// CHECK-NEXT: 224
+// CHECK-NEXT: 225
+// CHECK-NEXT: 226
+// CHECK-NEXT: 227
+// CHECK-NEXT: 228
+// CHECK-NEXT: 229
+// CHECK-NEXT: 230
+// CHECK-NEXT: 231
+// CHECK-NEXT: 232
+// CHECK-NEXT: 233
+// CHECK-NEXT: 234
+// CHECK-NEXT: 235
+// CHECK-NEXT: 236
+// CHECK-NEXT: 237
+// CHECK-NEXT: 238
+// CHECK-NEXT: 239
+// CHECK-NEXT: 240
+// CHECK-NEXT: 241
+// CHECK-NEXT: 242
+// CHECK-NEXT: 243
+// CHECK-NEXT: 244
+// CHECK-NEXT: 245
+// CHECK-NEXT: 246
+// CHECK-NEXT: 247
+// CHECK-NEXT: 248
+// CHECK-NEXT: 249
+// CHECK-NEXT: 250
+// CHECK-NEXT: 251
+// CHECK-NEXT: 252
+// CHECK-NEXT: 253
+// CHECK-NEXT: 254
+
+package main
+
+func testBasics() {
+ var i [2]int
+ j := &i
+ i[0] = 123
+ i[1] = 456
+ println(i[0], i[1])
+ println(j[0], j[1])
+ i[0]++
+ i[1]--
+ println(i[0], i[1])
+ println(j[0], j[1])
+}
+
+func testByteIndex() {
+ var a [255]int
+ for i := 0; i < len(a); i++ {
+ a[i] = i
+ }
+ for i := byte(0); i < byte(len(a)); i++ {
+ println(a[i])
+ }
+}
+
+func main() {
+ //testBasics()
+ testByteIndex()
+}
diff --git a/test/execution/arrays/range.go b/test/execution/arrays/range.go
new file mode 100644
index 0000000..37d22f6
--- /dev/null
+++ b/test/execution/arrays/range.go
@@ -0,0 +1,23 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 0 0
+// CHECK-NEXT: 1 1 1
+// CHECK-NEXT: 2 2 2
+// CHECK-NEXT: 3 0 0
+// CHECK-NEXT: 4 4 4
+// CHECK-NEXT: 0 10
+// CHECK-NEXT: 1 20
+// CHECK-NEXT: 2 30
+
+package main
+
+func main() {
+ a := [...]int{1: 1, 2: 2, 4: 4}
+ for i, val := range a {
+ println(i, val, a[i])
+ }
+ for i, val := range [...]int{10, 20, 30} {
+ println(i, val)
+ }
+}
diff --git a/test/execution/arrays/slice.go b/test/execution/arrays/slice.go
new file mode 100644
index 0000000..e288090
--- /dev/null
+++ b/test/execution/arrays/slice.go
@@ -0,0 +1,14 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 10
+// CHECK-NEXT: 9
+
+package main
+
+func main() {
+ var a [10]int
+ b := a[1:]
+ println(len(a))
+ println(len(b))
+}
diff --git a/test/execution/assignment/arrays.go b/test/execution/assignment/arrays.go
new file mode 100644
index 0000000..30921a4
--- /dev/null
+++ b/test/execution/assignment/arrays.go
@@ -0,0 +1,22 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: +1.000000e+000
+// CHECK-NEXT: +2.000000e+000
+// CHECK-NEXT: +3.000000e+000
+
+package main
+
+var a1 = [...]float32{1.0, 2.0, 3.0}
+
+func main() {
+ var a2 [3]float32
+ a2 = a1
+ println(a2[0])
+ println(a2[1])
+ println(a2[2])
+
+ // broken due to lack of promotion of
+ // stack to heap.
+ //println(a2[0], a2[1], a2[2])
+}
diff --git a/test/execution/assignment/binop.go b/test/execution/assignment/binop.go
new file mode 100644
index 0000000..51793ff
--- /dev/null
+++ b/test/execution/assignment/binop.go
@@ -0,0 +1,21 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 246
+// CHECK-NEXT: 123
+// CHECK-NEXT: 124
+// CHECK-NEXT: 123
+
+package main
+
+func main() {
+ x := 123
+ x *= 2
+ println(x)
+ x /= 2
+ println(x)
+ x += 1
+ println(x)
+ x -= 1
+ println(x)
+}
diff --git a/test/execution/assignment/dereferencing.go b/test/execution/assignment/dereferencing.go
new file mode 100644
index 0000000..607e28d
--- /dev/null
+++ b/test/execution/assignment/dereferencing.go
@@ -0,0 +1,13 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 123
+
+package main
+
+func main() {
+ var x int
+ px := &x
+ *px = 123
+ println(x)
+}
diff --git a/test/execution/assignment/multi.go b/test/execution/assignment/multi.go
new file mode 100644
index 0000000..60b8805
--- /dev/null
+++ b/test/execution/assignment/multi.go
@@ -0,0 +1,40 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 123 456
+// CHECK-NEXT: 456 123
+// CHECK-NEXT: 456 123
+// CHECK-NEXT: 123 456
+// CHECK-NEXT: 123 456
+
+package main
+
+func xyz() (int, int) {
+ return 123, 456
+}
+
+func abc() (int, int) {
+ var a, b = xyz()
+ return a, b
+}
+
+type S struct {
+ a int
+ b int
+}
+
+func main() {
+ a, b := xyz()
+ println(a, b)
+ b, a = abc()
+ println(a, b)
+
+ // swap
+ println(a, b)
+ a, b = b, a
+ println(a, b)
+
+ var s S
+ s.a, s.b = a, b
+ println(s.a, s.b)
+}
diff --git a/test/execution/assignment/namedresult.go b/test/execution/assignment/namedresult.go
new file mode 100644
index 0000000..daf9b00
--- /dev/null
+++ b/test/execution/assignment/namedresult.go
@@ -0,0 +1,42 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 123
+// CHECK-NEXT: 456
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: 666 0
+
+package main
+
+func f1() (x int) {
+ x = 123
+ return
+}
+
+func f2() (x int) {
+ return 456
+}
+
+func f3() (x, y int) {
+ y, x = 2, 1
+ return
+}
+
+func f4() (x, _ int) {
+ x = 666
+ return
+}
+
+func main() {
+ x := f1()
+ println(x)
+ x = f2()
+ println(x)
+
+ var y int
+ x, y = f3()
+ println(x, y)
+
+ x, y = f4()
+ println(x, y)
+}
diff --git a/test/execution/branching/goto.go b/test/execution/branching/goto.go
new file mode 100644
index 0000000..748c666
--- /dev/null
+++ b/test/execution/branching/goto.go
@@ -0,0 +1,43 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 6
+// CHECK-NEXT: 7
+// CHECK-NEXT: 8
+// CHECK-NEXT: 9
+// CHECK-NEXT: done
+// CHECK-NEXT: !
+
+package main
+
+func f1() {
+ goto labeled
+labeled:
+ goto done
+ return
+done:
+ println("!")
+}
+
+func main() {
+ i := 0
+start:
+ if i < 10 {
+ println(i)
+ i++
+ goto start
+ } else {
+ goto end
+ }
+ return
+end:
+ println("done")
+ f1()
+ return
+}
diff --git a/test/execution/branching/labeled.go b/test/execution/branching/labeled.go
new file mode 100644
index 0000000..efff5ef
--- /dev/null
+++ b/test/execution/branching/labeled.go
@@ -0,0 +1,22 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+
+package main
+
+func labeledBreak() {
+ var i int
+L:
+ for ; i < 10; i++ {
+ switch {
+ default:
+ break L
+ }
+ }
+ println(i)
+}
+
+func main() {
+ labeledBreak()
+}
diff --git a/test/execution/chan/buffered.go b/test/execution/chan/buffered.go
new file mode 100644
index 0000000..9247ce3
--- /dev/null
+++ b/test/execution/chan/buffered.go
@@ -0,0 +1,41 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 0
+// CHECK-NEXT: 0 1
+// CHECK-NEXT: 10
+// CHECK-NEXT: 20
+// CHECK-NEXT: 30
+// CHECK-NEXT: 40
+// CHECK-NEXT: 50
+// CHECK-NEXT: 60
+// CHECK-NEXT: 70
+// CHECK-NEXT: 80
+// CHECK-NEXT: 90
+// CHECK-NEXT: 100
+// CHECK-NEXT: -1
+
+package main
+
+func main() {
+ c := make(chan int)
+ println(len(c), cap(c))
+ c1 := make(chan int, 1)
+ println(len(c1), cap(c1))
+ f := func() {
+ n, ok := <-c
+ if ok {
+ c1 <- n * 10
+ } else {
+ c1 <- -1
+ }
+ }
+ for i := 0; i < 10; i++ {
+ go f()
+ c <- i + 1
+ println(<-c1)
+ }
+ go f()
+ close(c)
+ println(<-c1)
+}
diff --git a/test/execution/chan/range.go b/test/execution/chan/range.go
new file mode 100644
index 0000000..eeaedb7
--- /dev/null
+++ b/test/execution/chan/range.go
@@ -0,0 +1,28 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 6
+// CHECK-NEXT: 7
+// CHECK-NEXT: 8
+// CHECK-NEXT: 9
+
+package main
+
+func main() {
+ ch := make(chan int)
+ go func() {
+ for i := 0; i < 10; i++ {
+ ch <- i
+ }
+ close(ch)
+ }()
+ for n := range ch {
+ println(n)
+ }
+}
diff --git a/test/execution/chan/select.go b/test/execution/chan/select.go
new file mode 100644
index 0000000..80872f5
--- /dev/null
+++ b/test/execution/chan/select.go
@@ -0,0 +1,27 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: sent a value
+// CHECK-NEXT: received 123
+// CHECK-NEXT: default
+
+package main
+
+func f1() {
+ c := make(chan int, 1)
+ for i := 0; i < 3; i++ {
+ select {
+ case n, _ := <-c:
+ println("received", n)
+ c = nil
+ case c <- 123:
+ println("sent a value")
+ default:
+ println("default")
+ }
+ }
+}
+
+func main() {
+ f1()
+}
diff --git a/test/execution/chan/self.go b/test/execution/chan/self.go
new file mode 100644
index 0000000..d26ee4a
--- /dev/null
+++ b/test/execution/chan/self.go
@@ -0,0 +1,20 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ ch := make(chan int, uint8(1))
+
+ ch <- 1
+ println(<-ch)
+
+ ch <- 2
+ x, ok := <-ch
+ println(x)
+ println(ok)
+}
diff --git a/test/execution/circulartype.go b/test/execution/circulartype.go
new file mode 100644
index 0000000..01e9ce4
--- /dev/null
+++ b/test/execution/circulartype.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | count 0
+
+package main
+
+type A struct {
+ b1, b2 B
+}
+
+type B struct {
+ a1, a2 *A
+}
+
+func main() {
+ var a A
+ _ = a
+}
diff --git a/test/execution/closures/basic.go b/test/execution/closures/basic.go
new file mode 100644
index 0000000..2a84fc1
--- /dev/null
+++ b/test/execution/closures/basic.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: abc
+
+package main
+
+func cat(a, b string) func(string) string {
+ return func(c string) string { return a + b + c }
+}
+
+func main() {
+ f := cat("a", "b")
+ println(f("c"))
+}
diff --git a/test/execution/closures/issue176.go b/test/execution/closures/issue176.go
new file mode 100644
index 0000000..4a505ca
--- /dev/null
+++ b/test/execution/closures/issue176.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: false
+
+package main
+
+func main() {
+ a := false
+ f := func() {
+ make(chan *bool, 1) <- &a
+ }
+ f()
+ println(a)
+}
diff --git a/test/execution/complex.go b/test/execution/complex.go
new file mode 100644
index 0000000..ad2a87b
--- /dev/null
+++ b/test/execution/complex.go
@@ -0,0 +1,24 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: (+1.000000e+000+2.000000e+000i)
+// CHECK-NEXT: (-1.000000e+000-2.000000e+000i)
+// CHECK-NEXT: true
+// CHECK-NEXT: (+1.000000e+000+2.000000e+000i)
+// CHECK-NEXT: (-1.000000e+000-2.000000e+000i)
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ var f32 float32 = 1
+ var f64 float64 = 1
+ c64 := complex(f32, f32+1)
+ println(c64)
+ println(-c64)
+ println(c64 == c64)
+ c128 := complex(f64, f64+1)
+ println(c128)
+ println(-c128)
+ println(c128 == c128)
+}
diff --git a/test/execution/const.go b/test/execution/const.go
new file mode 100644
index 0000000..45dbb0f
--- /dev/null
+++ b/test/execution/const.go
@@ -0,0 +1,78 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 1 1
+// CHECK-NEXT: 1 1 1 4
+// CHECK-NEXT: 2147483647
+// CHECK-NEXT: -2147483648
+// CHECK-NEXT: 2147483647
+// CHECK-NEXT: -127
+// CHECK-NEXT: false
+// CHECK-NEXT: 10000000000
+// CHECK-NEXT: 1
+// CHECK-NEXT: 3
+
+package main
+
+import "runtime"
+
+const (
+ a = iota * 2
+ A = 1
+ B
+ C
+ D = Z + iota
+)
+
+const (
+ Z = iota
+ Big = 1<<31 - 1
+ Big2 = -2147483648
+ Big3 = 2147483647
+)
+
+const (
+ expbits32 uint = 8
+ bias32 = -1<<(expbits32-1) + 1
+ darwinAMD64 = runtime.GOOS == "darwin" && runtime.GOARCH == "amd64"
+)
+
+func f1() float32 {
+ return 0
+}
+
+func constArrayLen() {
+ a := [...]int{1, 2, 3}
+ const x = len(a)
+ println(x)
+}
+
+func main() {
+ println(a)
+ println(B)
+ println(A, A)
+ println(A, B, C, D)
+ println(Big)
+ println(Big2)
+ println(Big3)
+ println(bias32)
+
+ // Currently fails, due to difference in C printf and Go's println
+ // formatting of the exponent.
+ //println(10 * 1e9)
+ println(darwinAMD64)
+
+ // Test conversion.
+ println(int64(10) * 1e9)
+
+ // Ensure consts work just as well when declared inside a function.
+ const (
+ x_ = iota
+ y_
+ )
+ println(y_)
+
+ constArrayLen()
+}
diff --git a/test/execution/conversions/complex.go b/test/execution/conversions/complex.go
new file mode 100644
index 0000000..91dd366
--- /dev/null
+++ b/test/execution/conversions/complex.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | count 0
+
+package main
+
+func constIntToComplex() complex128 {
+ return 0
+}
+
+func main() {
+ var c64 complex64
+ var c128 complex128
+ c128 = complex128(c64)
+ c64 = complex64(c128)
+}
diff --git a/test/execution/conversions/float.go b/test/execution/conversions/float.go
new file mode 100644
index 0000000..def11d1
--- /dev/null
+++ b/test/execution/conversions/float.go
@@ -0,0 +1,103 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: 133
+// CHECK-NEXT: 65413
+// CHECK-NEXT: 4294967173
+// CHECK-NEXT: 18446744073709551493
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: 123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: -123
+// CHECK-NEXT: 133
+// CHECK-NEXT: 65413
+// CHECK-NEXT: 4294967173
+// CHECK-NEXT: 18446744073709551493
+// CHECK-NEXT: +1.230000e+002
+// CHECK-NEXT: +1.230000e+002
+// CHECK-NEXT: +1.230000e+002
+// CHECK-NEXT: +1.230000e+002
+// CHECK-NEXT: +1.234500e+004
+// CHECK-NEXT: +1.234500e+004
+// CHECK-NEXT: +1.234500e+004
+// CHECK-NEXT: +1.234500e+004
+// CHECK-NEXT: +1.234560e+005
+// CHECK-NEXT: +1.234560e+005
+// CHECK-NEXT: +1.234560e+005
+// CHECK-NEXT: +1.234560e+005
+// CHECK-NEXT: +1.234568e+010
+// CHECK-NEXT: +1.234568e+010
+// CHECK-NEXT: +1.234568e+010
+// CHECK-NEXT: +1.234568e+010
+
+package main
+
+func main() {
+ // float to int
+ for _, f32 := range []float32{123.456, -123.456} {
+ println(int8(f32))
+ println(int16(f32))
+ println(int32(f32))
+ println(int64(f32))
+ println(uint8(f32))
+ println(uint16(f32))
+ println(uint32(f32))
+ println(uint64(f32))
+ }
+ for _, f64 := range []float64{123.456, -123.456} {
+ println(int8(f64))
+ println(int16(f64))
+ println(int32(f64))
+ println(int64(f64))
+ println(uint8(f64))
+ println(uint16(f64))
+ println(uint32(f64))
+ println(uint64(f64))
+ }
+
+ // int to float
+ var i8 int8 = 123
+ println(float32(i8))
+ println(float64(i8))
+ var ui8 uint8 = 123
+ println(float32(ui8))
+ println(float64(ui8))
+ var i16 int32 = 12345
+ println(float32(i16))
+ println(float64(i16))
+ var ui16 uint32 = 12345
+ println(float32(ui16))
+ println(float64(ui16))
+ var i32 int32 = 123456
+ println(float32(i32))
+ println(float64(i32))
+ var ui32 uint32 = 123456
+ println(float32(ui32))
+ println(float64(ui32))
+ var i64 int64 = 12345678910
+ println(float32(i64))
+ println(float64(i64))
+ var ui64 uint64 = 12345678910
+ println(float32(ui64))
+ println(float64(ui64))
+}
diff --git a/test/execution/conversions/int.go b/test/execution/conversions/int.go
new file mode 100644
index 0000000..092003b
--- /dev/null
+++ b/test/execution/conversions/int.go
@@ -0,0 +1,44 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 2147483647
+// CHECK-NEXT: 2147483647
+// CHECK-NEXT: 2147483647
+// CHECK-NEXT: 2147483648
+// CHECK-NEXT: -2147483648
+// CHECK-NEXT: 18446744071562067968
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: -1
+// CHECK-NEXT: 4294967295
+// CHECK-NEXT: 4294967295
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 1
+// CHECK-NEXT: 1
+
+package main
+
+func signed(i32 int32) {
+ println(uint32(i32))
+ println(int64(i32))
+ println(uint64(i32))
+}
+
+func unsigned(u32 uint32) {
+ println(int32(u32))
+ println(int64(u32))
+ println(uint64(u32))
+}
+
+func main() {
+ signed(1<<31 - 1)
+ signed(-1 << 31)
+ signed(0)
+ unsigned(1<<32 - 1)
+ unsigned(0)
+ unsigned(1)
+}
diff --git a/test/execution/conversions/sameunderlying.go b/test/execution/conversions/sameunderlying.go
new file mode 100644
index 0000000..8208cfb
--- /dev/null
+++ b/test/execution/conversions/sameunderlying.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | count 0
+
+package main
+
+type X struct{}
+type Y X
+
+func main() {
+ var x X
+ px := &x
+ py := (*Y)(&x)
+ py = (*Y)(px)
+ _ = py
+}
diff --git a/test/execution/defer.go b/test/execution/defer.go
new file mode 100644
index 0000000..ba1b632
--- /dev/null
+++ b/test/execution/defer.go
@@ -0,0 +1,125 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: f2.1
+// CHECK-NEXT: f5
+// CHECK-NEXT: recovered no error
+// CHECK-NEXT: f5
+// CHECK-NEXT: recovered: meep meep
+// CHECK-NEXT: 888
+// CHECK-NEXT: f5
+// CHECK-NEXT: recovered no error
+// CHECK-NEXT: f5
+// CHECK-NEXT: recovered no error
+// CHECK-NEXT: 888
+// CHECK-NEXT: 456
+// CHECK-NEXT: 999
+// CHECK-NEXT: 999
+// CHECK-NEXT: 123
+// CHECK-NEXT: 999
+// CHECK-NEXT: 999
+// CHECK-NEXT: 246
+// CHECK-NEXT: f2.2
+// CHECK-NEXT: f2.3
+// CHECK-NEXT: f1.1
+// CHECK-NEXT: f1.2
+// CHECK-NEXT: recovered: second
+// CHECK-NEXT: ahoy
+
+package main
+
+type T struct {
+ value int
+}
+
+type T1 struct {
+ T
+}
+
+func (t T) abc() {
+ println(t.value)
+}
+
+func (t *T) def() {
+ println(t.value)
+}
+
+func (t *T) ghi(v int) {
+ println(v)
+}
+
+func printerr(err interface{}) {
+ if err != nil {
+ println("recovered:", err.(string))
+ } else {
+ println("recovered no error")
+ }
+}
+
+func f6() {
+ defer func() { printerr(recover()) }()
+ defer func() { panic("second") }()
+ panic("first")
+}
+
+func f5(panic_ bool) {
+ var t1 T1
+ t1.T.value = 888
+ defer t1.abc()
+ var f func(int)
+ f = func(recursion int) {
+ if recursion > 0 {
+ f(recursion - 1)
+ return
+ }
+ println("f5")
+ printerr(recover())
+ }
+ defer f(0) // will recover (after f(1))
+ defer f(1) // won't recover
+ if panic_ {
+ panic("meep meep")
+ }
+}
+
+func f4() {
+ var a T = T{999}
+ var b *T = &a
+ defer a.abc()
+ defer a.def()
+ defer a.ghi(123)
+ defer b.abc()
+ defer b.def()
+ defer b.ghi(456)
+ f5(true)
+ f5(false) // verify the recover in f5 works
+}
+
+func f3() (a int) {
+ defer func() { a *= 2 }()
+ f4()
+ return 123
+}
+
+func f2() {
+ defer func() { println("f2.3") }()
+ defer func(s string) { println(s) }("f2.2")
+ println("f2.1")
+ println(f3())
+}
+
+func f1() {
+ defer func() { println("f1.2") }()
+ defer func() { println("f1.1") }()
+ f2()
+}
+
+func builtins() {
+ defer println("ahoy")
+}
+
+func main() {
+ f1()
+ f6()
+ builtins()
+}
diff --git a/test/execution/errors/recover.go b/test/execution/errors/recover.go
new file mode 100644
index 0000000..70ee2ac
--- /dev/null
+++ b/test/execution/errors/recover.go
@@ -0,0 +1,11 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: (0x0,0x0)
+
+package main
+
+func main() {
+ err := recover()
+ println(err)
+}
diff --git a/test/execution/for/branch.go b/test/execution/for/branch.go
new file mode 100644
index 0000000..5484c89
--- /dev/null
+++ b/test/execution/for/branch.go
@@ -0,0 +1,41 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 0
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+
+package main
+
+func main() {
+ for i := 0; true; i++ {
+ println(i)
+ if i == 2 {
+ println(3)
+ break
+ }
+ println(1)
+ i++
+ continue
+ println("unreachable")
+ }
+
+ nums := [...]int{0, 1, 2, 3, 4, 5}
+ for n := range nums {
+ if n == 1 {
+ continue
+ }
+ println(n)
+ if n == 4 {
+ {
+ break
+ }
+ println("!")
+ }
+ }
+}
diff --git a/test/execution/fun.go b/test/execution/fun.go
new file mode 100644
index 0000000..c70fe69
--- /dev/null
+++ b/test/execution/fun.go
@@ -0,0 +1,28 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 246
+// CHECK-NEXT: 123 true false
+
+// vim: set ft=go :
+
+package main
+
+func test() func() int {
+ return blah
+}
+
+func blah() int {
+ return 123
+}
+
+func sret() (int, bool, bool) {
+ return 123, true, false
+}
+
+func main() {
+ f := test()
+ println(2 * f())
+ a, b, c := sret()
+ println(a, b, c)
+}
diff --git a/test/execution/functions/compare.go b/test/execution/functions/compare.go
new file mode 100644
index 0000000..9daa062
--- /dev/null
+++ b/test/execution/functions/compare.go
@@ -0,0 +1,26 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ var f func()
+ println(f == nil)
+ println(f != nil)
+ println(nil == f)
+ println(nil != f)
+ f = func() {}
+ println(f == nil)
+ println(f != nil)
+ println(nil == f)
+ println(nil != f)
+}
diff --git a/test/execution/functions/multivalue.go b/test/execution/functions/multivalue.go
new file mode 100644
index 0000000..a3ce79b
--- /dev/null
+++ b/test/execution/functions/multivalue.go
@@ -0,0 +1,28 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 20
+// CHECK-NEXT: extra: 10
+
+package main
+
+func swap(a, b int) (int, int) {
+ return b, a
+}
+
+func sub(a, b int) int {
+ return a - b
+}
+
+func printint(a int, extra ...int) {
+ println(a)
+ for _, b := range extra {
+ println("extra:", b)
+ }
+}
+
+func main() {
+ println(sub(swap(1, 2)))
+ printint(swap(10, 20))
+}
diff --git a/test/execution/functions/unreachable.go b/test/execution/functions/unreachable.go
new file mode 100644
index 0000000..436012c
--- /dev/null
+++ b/test/execution/functions/unreachable.go
@@ -0,0 +1,53 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: f1
+// CHECK-NEXT: f2
+// CHECK-NEXT: f3
+// CHECK-NEXT: f4
+// CHECK-NEXT: 123
+
+package main
+
+func f1() {
+ if true {
+ println("f1")
+ return
+ }
+ for {
+ }
+}
+
+func f2() {
+ defer func() { println("f2") }()
+ if true {
+ return
+ }
+ for {
+ }
+}
+
+func f3() int {
+ if true {
+ println("f3")
+ return 123
+ }
+ for {
+ }
+}
+
+func f4() int {
+ defer func() { println("f4") }()
+ if true {
+ return 123
+ }
+ for {
+ }
+}
+
+func main() {
+ f1()
+ f2()
+ f3()
+ println(f4())
+}
diff --git a/test/execution/go.go b/test/execution/go.go
new file mode 100644
index 0000000..2bfe775
--- /dev/null
+++ b/test/execution/go.go
@@ -0,0 +1,34 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: hello from T 1
+// CHECK-NEXT: hello from T 2
+
+package main
+
+type T struct {
+ val int
+}
+
+func (t T) Hello(done chan bool) {
+ println("hello from T", t.val)
+ done <- true
+}
+
+type I interface {
+ Hello(chan bool)
+}
+
+func main() {
+ done := make(chan bool)
+
+ t := T{1}
+ go t.Hello(done)
+ <-done
+
+ var i I = T{2}
+ go i.Hello(done)
+ <-done
+
+ go println("hello builtin")
+}
diff --git a/test/execution/if/lazy.go b/test/execution/if/lazy.go
new file mode 100644
index 0000000..5171a77
--- /dev/null
+++ b/test/execution/if/lazy.go
@@ -0,0 +1,46 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: False()
+// CHECK-NEXT: False()
+// CHECK-NEXT: false
+// CHECK-NEXT: False()
+// CHECK-NEXT: True()
+// CHECK-NEXT: true
+// CHECK-NEXT: True()
+// CHECK-NEXT: true
+// CHECK-NEXT: True()
+// CHECK-NEXT: true
+// CHECK-NEXT: False()
+// CHECK-NEXT: false
+// CHECK-NEXT: False()
+// CHECK-NEXT: false
+// CHECK-NEXT: True()
+// CHECK-NEXT: False()
+// CHECK-NEXT: false
+// CHECK-NEXT: True()
+// CHECK-NEXT: True()
+// CHECK-NEXT: true
+
+package main
+
+func False() bool {
+ println("False()")
+ return false
+}
+
+func True() bool {
+ println("True()")
+ return true
+}
+
+func main() {
+ println(False() || False())
+ println(False() || True())
+ println(True() || False())
+ println(True() || True())
+ println(False() && False())
+ println(False() && True())
+ println(True() && False())
+ println(True() && True())
+}
diff --git a/test/execution/init.go b/test/execution/init.go
new file mode 100644
index 0000000..46c7fa1
--- /dev/null
+++ b/test/execution/init.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s %p/Inputs/init2.go
+// RUN: %t 2>&1 | FileCheck %s
+
+package main
+
+// CHECK-DAG: do some other stuff before main
+//func init()
+
+// CHECK-DAG: do some stuff before main
+func init() {
+ println("do some stuff before main")
+}
+
+// CHECK: main has been called
+func main() {
+ println("main has been called")
+}
diff --git a/test/execution/interfaces/assert.go b/test/execution/interfaces/assert.go
new file mode 100644
index 0000000..4c5db7d
--- /dev/null
+++ b/test/execution/interfaces/assert.go
@@ -0,0 +1,61 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: x is nil
+// CHECK-NEXT: i2v: 123456
+// CHECK-NEXT: !
+// CHECK-NEXT: (*X).F1: 123456
+
+package main
+
+type X struct{ x int }
+
+func (x *X) F1() { println("(*X).F1:", x.x) }
+func (x *X) F2() { println("(*X).F2") }
+
+type I interface {
+ F1()
+ F2()
+}
+
+func main() {
+ var x interface{}
+
+ // x is nil. Let's make sure an assertion on it
+ // won't cause a panic.
+ if x, ok := x.(int32); ok {
+ println("i2v:", x)
+ }
+ if x == nil {
+ println("x is nil")
+ }
+
+ x = int32(123456)
+
+ // Let's try an interface-to-value assertion.
+ if x, ok := x.(int32); ok {
+ println("i2v:", x)
+ }
+ if x, ok := x.(int64); ok {
+ println("i2v:", x)
+ }
+
+ // This will fail the assertion.
+ if i, ok := x.(I); ok {
+ i.F1()
+ _ = i
+ } else {
+ println("!")
+ }
+
+ // Assign an *X, which should pass the assertion.
+ x_ := new(X)
+ x_.x = 123456
+ x = x_ //&X{x: 123456}
+ if i, ok := x.(I); ok {
+ i.F1()
+ _ = i
+ } else {
+ println("!")
+ }
+}
diff --git a/test/execution/interfaces/basic.go b/test/execution/interfaces/basic.go
new file mode 100644
index 0000000..6c754da
--- /dev/null
+++ b/test/execution/interfaces/basic.go
@@ -0,0 +1,43 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: expected: y != z
+
+package main
+
+type any interface{}
+
+type Stringer interface {
+ String() string
+}
+
+type lessThanAWord struct {
+ a byte
+}
+
+func (l lessThanAWord) String() string {
+ return "!"
+}
+
+func makeAStringer() Stringer {
+ return lessThanAWord{}
+}
+
+func main() {
+ var x1, x2 int = 1, 2
+ var y any = x1
+ var z any = x2
+ if y != z {
+ println("expected: y != z")
+ } else {
+ println("unexpected: y == z")
+ }
+ /*
+ if y == x1 {
+ println("expected: y == x1")
+ } else {
+ println("unexpected: y == x1")
+ }
+ */
+ //println(y.(int))
+}
diff --git a/test/execution/interfaces/comparei2i.go b/test/execution/interfaces/comparei2i.go
new file mode 100644
index 0000000..5a1f266
--- /dev/null
+++ b/test/execution/interfaces/comparei2i.go
@@ -0,0 +1,30 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: true
+
+package main
+
+import "unsafe"
+
+type I interface {
+ X()
+}
+
+type T int
+
+func (t T) X() {
+}
+
+func main() {
+ var highbit uint32 = 1 << 31
+ var pos0 float32 = 0
+ var neg0 float32 = *(*float32)(unsafe.Pointer(&highbit))
+ var i1 interface{} = pos0
+ var i2 interface{} = neg0
+ println(i1 == i2)
+ var i3 interface{} = T(123)
+ var i4 I = T(123)
+ println(i3 == i4)
+}
diff --git a/test/execution/interfaces/comparei2v.go b/test/execution/interfaces/comparei2v.go
new file mode 100644
index 0000000..6547360
--- /dev/null
+++ b/test/execution/interfaces/comparei2v.go
@@ -0,0 +1,13 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+
+package main
+
+func main() {
+ var x interface{} = 123
+ println(x == 123)
+ println(x != 123)
+}
diff --git a/test/execution/interfaces/e2i_conversion.go b/test/execution/interfaces/e2i_conversion.go
new file mode 100644
index 0000000..6596e78
--- /dev/null
+++ b/test/execution/interfaces/e2i_conversion.go
@@ -0,0 +1,22 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | count 0
+
+package main
+
+import "io"
+
+type rdr struct{}
+
+func (r rdr) Read(b []byte) (int, error) {
+ return 0, nil
+}
+
+func F(i interface{}) {
+ _ = i.(io.Reader)
+}
+
+func main() {
+ var r rdr
+ F(r)
+ F(&r)
+}
diff --git a/test/execution/interfaces/embedded.go b/test/execution/interfaces/embedded.go
new file mode 100644
index 0000000..053afb1
--- /dev/null
+++ b/test/execution/interfaces/embedded.go
@@ -0,0 +1,32 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: A
+// CHECK-NEXT: B
+
+package main
+
+type BI interface {
+ B()
+}
+
+type AI interface {
+ A()
+ BI
+}
+
+type S struct{}
+
+func (s S) A() {
+ println("A")
+}
+
+func (s S) B() {
+ println("B")
+}
+
+func main() {
+ var ai AI = S{}
+ ai.A()
+ ai.B()
+}
diff --git a/test/execution/interfaces/error.go b/test/execution/interfaces/error.go
new file mode 100644
index 0000000..1724436
--- /dev/null
+++ b/test/execution/interfaces/error.go
@@ -0,0 +1,43 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: !!!! 123
+// CHECK-NEXT: errno 123
+
+package main
+
+var errors = [...]string{}
+
+func itoa(val int) string { // do it here rather than with fmt to avoid dependency
+ if val < 0 {
+ return "-" + itoa(-val)
+ }
+ var buf [32]byte // big enough for int64
+ i := len(buf) - 1
+ for val >= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return string(buf[i:])
+}
+
+type Errno uintptr
+
+func (e Errno) Error() string {
+ println("!!!!", uintptr(e))
+ if 0 <= int(e) && int(e) < len(errors) {
+ s := errors[e]
+ if s != "" {
+ return s
+ }
+ }
+ return "errno " + itoa(int(e))
+}
+
+func main() {
+ e := Errno(123)
+ i := (interface{})(e)
+ println(i.(error).Error())
+}
diff --git a/test/execution/interfaces/i2i_conversion.go b/test/execution/interfaces/i2i_conversion.go
new file mode 100644
index 0000000..b55d840
--- /dev/null
+++ b/test/execution/interfaces/i2i_conversion.go
@@ -0,0 +1,33 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 666
+// CHECK-NEXT: The Beast
+
+package main
+
+type Numbered interface {
+ Number() int
+}
+
+type Named interface {
+ Name() string
+}
+
+type Beast struct{}
+
+func (b *Beast) Number() int {
+ return 666
+}
+
+func (b *Beast) Name() string {
+ return "The Beast"
+}
+
+func main() {
+ var b Beast
+ var numbered Numbered = &b
+ var named Named = numbered.(Named)
+ println(numbered.Number())
+ println(named.Name())
+}
diff --git a/test/execution/interfaces/import.go b/test/execution/interfaces/import.go
new file mode 100644
index 0000000..3305ee6
--- /dev/null
+++ b/test/execution/interfaces/import.go
@@ -0,0 +1,16 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+package main
+
+import "syscall"
+
+type Signal interface {
+ Signal()
+}
+
+func main() {
+ var s Signal = syscall.SIGINT
+ // CHECK: ({{.*}},{{.*}})
+ println(s)
+}
diff --git a/test/execution/interfaces/methods.go b/test/execution/interfaces/methods.go
new file mode 100644
index 0000000..5fb704c
--- /dev/null
+++ b/test/execution/interfaces/methods.go
@@ -0,0 +1,53 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: X()
+// CHECK-NEXT: Y()
+// CHECK-NEXT: X()
+// CHECK-NEXT: Y()
+// CHECK-NEXT: X()
+
+package main
+
+type Stringer interface {
+ String() string
+}
+
+type X int
+type Y int
+
+type Z1 struct {
+ X
+}
+
+type Z2 struct {
+ Stringer
+}
+
+func (x X) String() string {
+ return "X()"
+}
+
+func (y *Y) String() string {
+ return "Y()"
+}
+
+func makeX() X {
+ return X(0)
+}
+
+func main() {
+ var z Stringer = X(0)
+ println(z.String())
+
+ z = new(Y)
+ println(z.String())
+
+ z = Z1{}
+ println(z.String())
+
+ z = Z2{new(Y)}
+ println(z.String())
+
+ println(makeX().String())
+}
diff --git a/test/execution/interfaces/static_conversion.go b/test/execution/interfaces/static_conversion.go
new file mode 100644
index 0000000..e63f10d
--- /dev/null
+++ b/test/execution/interfaces/static_conversion.go
@@ -0,0 +1,35 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 666
+// CHECK-NEXT: 3
+
+package main
+
+type Blah interface{}
+type Numbered interface {
+ Blah
+ Number() int
+}
+
+type Beast struct{}
+
+func (b *Beast) Number() int {
+ return 666
+}
+
+type MagicNumber int
+
+func (m MagicNumber) Number() int {
+ return int(m)
+}
+
+func main() {
+ var b Beast
+ var m MagicNumber = 3
+ var n Numbered = &b
+ println(n.Number())
+
+ n = m
+ println(n.Number())
+}
diff --git a/test/execution/interfaces/wordsize.go b/test/execution/interfaces/wordsize.go
new file mode 100644
index 0000000..e2de5d5
--- /dev/null
+++ b/test/execution/interfaces/wordsize.go
@@ -0,0 +1,40 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: StringStringer(abc)
+// CHECK-NEXT: abc 1 2 3
+
+package main
+
+type Stringer interface {
+ String() string
+}
+
+type StringStringer string
+
+func (s StringStringer) String() string {
+ return "StringStringer(" + string(s) + ")"
+}
+
+func (s StringStringer) MethodWithArgs(a, b, c int) {
+ println(s, a, b, c)
+}
+
+type I interface {
+ MethodWithArgs(a, b, c int)
+}
+
+func testLargerThanWord() {
+ // string is larger than a word. Make sure it works
+ // well as a method receiver when using interfaces.
+ var s Stringer = StringStringer("abc")
+ println(s.String())
+
+ // Test calling a method which takes parameters
+ // beyond the receiver.
+ s.(I).MethodWithArgs(1, 2, 3)
+}
+
+func main() {
+ testLargerThanWord()
+}
diff --git a/test/execution/literals/array.go b/test/execution/literals/array.go
new file mode 100644
index 0000000..975ec02
--- /dev/null
+++ b/test/execution/literals/array.go
@@ -0,0 +1,78 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 9223372036854775808 -63 false
+// CHECK-NEXT: 11529215046068469760 -60 false
+// CHECK-NEXT: 14411518807585587200 -57 false
+// CHECK-NEXT: 18014398509481984000 -54 false
+// CHECK-NEXT: 11258999068426240000 -50 false
+// CHECK-NEXT: 14073748835532800000 -47 false
+// CHECK-NEXT: 17592186044416000000 -44 false
+// CHECK-NEXT: 10995116277760000000 -40 false
+// CHECK-NEXT: 0 0
+// CHECK-NEXT: 1 0
+// CHECK-NEXT: 2 1
+// CHECK-NEXT: 3 0
+// CHECK-NEXT: 4 2
+// CHECK-NEXT: 5 0
+// CHECK-NEXT: 6 3
+// CHECK-NEXT: 7 0
+// CHECK-NEXT: 8 4
+// CHECK-NEXT: 9 0
+// CHECK-NEXT: 0 1
+// CHECK-NEXT: 1 2
+
+package main
+
+// An extFloat represents an extended floating-point number, with more
+// precision than a float64. It does not try to save bits: the
+// number represented by the structure is mant*(2^exp), with a negative
+// sign if neg is true.
+type extFloat struct {
+ mant uint64
+ exp int
+ neg bool
+}
+
+var smallPowersOfTen = [...]extFloat{
+ {1 << 63, -63, false}, // 1
+ {0xa << 60, -60, false}, // 1e1
+ {0x64 << 57, -57, false}, // 1e2
+ {0x3e8 << 54, -54, false}, // 1e3
+ {0x2710 << 50, -50, false}, // 1e4
+ {0x186a0 << 47, -47, false}, // 1e5
+ {0xf4240 << 44, -44, false}, // 1e6
+ {0x989680 << 40, -40, false}, // 1e7
+}
+
+var arrayWithHoles = [10]int{
+ 2: 1,
+ 4: 2,
+ 6: 3,
+ 8: 4,
+}
+
+type namedInt int32
+
+const N0 namedInt = 0
+const N1 namedInt = 1
+
+var arrayWithNamedIndices = [...]int{
+ N0: 1,
+ N1: 2,
+}
+
+func main() {
+ for i := range smallPowersOfTen {
+ s := smallPowersOfTen[i]
+ println(s.mant, s.exp, s.neg)
+ }
+
+ for i, value := range arrayWithHoles {
+ println(i, value)
+ }
+
+ for i, value := range arrayWithNamedIndices {
+ println(i, value)
+ }
+}
diff --git a/test/execution/literals/func.go b/test/execution/literals/func.go
new file mode 100644
index 0000000..b8dbef3
--- /dev/null
+++ b/test/execution/literals/func.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+
+package main
+
+func main() {
+ f := func(x bool) {
+ println(x)
+ }
+ f(true)
+ f(false)
+}
diff --git a/test/execution/literals/map.go b/test/execution/literals/map.go
new file mode 100644
index 0000000..32173f5
--- /dev/null
+++ b/test/execution/literals/map.go
@@ -0,0 +1,24 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: false
+// CHECK-NEXT: 2
+// CHECK-NEXT: 1 0 3
+// CHECK-NEXT: 0.1
+// CHECK-NEXT: 0.2
+// CHECK-NEXT: 0.3
+
+package main
+
+func main() {
+ type IntMap map[int]int
+ m := IntMap{0: 1, 2: 3}
+ println(m == nil)
+ println(len(m))
+ println(m[0], m[1], m[2])
+
+ f32tostr := map[float32]string{0.1: "0.1", 0.2: "0.2", 0.3: "0.3"}
+ println(f32tostr[0.1])
+ println(f32tostr[0.2])
+ println(f32tostr[0.3])
+}
diff --git a/test/execution/literals/slice.go b/test/execution/literals/slice.go
new file mode 100644
index 0000000..dbb02bf
--- /dev/null
+++ b/test/execution/literals/slice.go
@@ -0,0 +1,21 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: abc
+// CHECK-NEXT: 123
+// CHECK-NEXT: abc
+// CHECK-NEXT: 123
+
+package main
+
+func main() {
+ x := []string{"abc", "123"}
+ println(x[0])
+ println(x[1])
+
+ // Elements are composite literals, so the '&' can be elided.
+ type S struct{ string }
+ y := []*S{{"abc"}, {"123"}}
+ println(y[0].string)
+ println(y[1].string)
+}
diff --git a/test/execution/literals/struct.go b/test/execution/literals/struct.go
new file mode 100644
index 0000000..4fede95
--- /dev/null
+++ b/test/execution/literals/struct.go
@@ -0,0 +1,62 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1 2
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: 0 1 2
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: 3 4
+
+package main
+
+type E struct {
+ e *E
+}
+
+type S struct {
+ *E
+ a, b int
+}
+
+type File struct {
+}
+
+type Reader struct {
+}
+
+type Response struct {
+}
+
+type reader struct {
+ *Reader
+ fd *File
+ resp *Response
+}
+
+type Range32 struct {
+ Lo uint32
+ Hi uint32
+ Stride uint32
+}
+
+func main() {
+ s := &S{nil, 1, 2}
+ println(s.a, s.b)
+ s = &S{a: 1, b: 2}
+ println(s.a, s.b)
+
+ _ = &reader{}
+
+ r := Range32{
+ Lo: 0,
+ Stride: 2,
+ Hi: 1,
+ }
+ println(r.Lo, r.Hi, r.Stride)
+
+ // slice of structs
+ ss := []S{{nil, 1, 2}, {nil, 3, 4}}
+ for _, s := range ss {
+ println(s.a, s.b)
+ }
+}
diff --git a/test/execution/maps/delete.go b/test/execution/maps/delete.go
new file mode 100644
index 0000000..6879974
--- /dev/null
+++ b/test/execution/maps/delete.go
@@ -0,0 +1,19 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 1 1
+// CHECK-NEXT: 0 0
+
+package main
+
+func main() {
+ m := make(map[int]int)
+ delete(m, 0) // no-op
+ m[0] = 1
+ println(len(m))
+ delete(m, 1) // no-op
+ println(len(m), m[0])
+ delete(m, 0) // delete element in map
+ println(len(m), m[0])
+}
diff --git a/test/execution/maps/insert.go b/test/execution/maps/insert.go
new file mode 100644
index 0000000..90b31cc
--- /dev/null
+++ b/test/execution/maps/insert.go
@@ -0,0 +1,29 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 456
+// CHECK-NEXT: 1
+// CHECK-NEXT: 789
+
+package main
+
+func main() {
+ {
+ var m map[int]int
+ println(len(m)) // 0
+ println(m[123]) // 0, despite map being nil
+ }
+
+ {
+ m := make(map[int]int)
+ m[123] = 456
+ println(len(m)) // 1
+ println(m[123])
+ m[123] = 789
+ println(len(m)) // 1
+ println(m[123])
+ }
+}
diff --git a/test/execution/maps/lookup.go b/test/execution/maps/lookup.go
new file mode 100644
index 0000000..69d2878
--- /dev/null
+++ b/test/execution/maps/lookup.go
@@ -0,0 +1,30 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 false
+// CHECK-NEXT: 1 true
+// CHECK-NEXT: 1 true
+// CHECK-NEXT: 1 true
+
+package main
+
+func main() {
+ m := make(map[int]int)
+ v, ok := m[8]
+ println(v, ok)
+ m[8] = 1
+ v, ok = m[8]
+ println(v, ok)
+
+ type S struct{ s1, s2 string }
+ sm := make(map[S]int)
+ sm[S{"ABC", "DEF"}] = 1
+ sv, ok := sm[S{string([]byte{65, 66, 67}), string([]byte{68, 69, 70})}]
+ println(sv, ok)
+
+ type A [2]string
+ am := make(map[A]int)
+ am[A{"ABC", "DEF"}] = 1
+ av, ok := am[A{string([]byte{65, 66, 67}), string([]byte{68, 69, 70})}]
+ println(av, ok)
+}
diff --git a/test/execution/maps/range.go b/test/execution/maps/range.go
new file mode 100644
index 0000000..f3dc03f
--- /dev/null
+++ b/test/execution/maps/range.go
@@ -0,0 +1,48 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 0 3
+// CHECK-NEXT: 1 4
+// CHECK-NEXT: 2 5
+// CHECK-NEXT: 1
+// CHECK-NEXT: done
+
+package main
+
+func main() {
+ defer println("done")
+ m := make(map[int]int)
+ m[0] = 3
+ m[1] = 4
+ m[2] = 5
+ for k := range m {
+ println(k)
+ }
+ for k, _ := range m {
+ println(k)
+ }
+ for _, v := range m {
+ println(v)
+ }
+ for k, v := range m {
+ println(k, v)
+ }
+
+ // test deletion.
+ i := 0
+ for k, _ := range m {
+ i++
+ delete(m, (k+1)%3)
+ delete(m, (k+2)%3)
+ }
+ println(i)
+}
diff --git a/test/execution/methods/methodvalues.go b/test/execution/methods/methodvalues.go
new file mode 100644
index 0000000..cf8a980
--- /dev/null
+++ b/test/execution/methods/methodvalues.go
@@ -0,0 +1,64 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 246
+// CHECK-NEXT: T2.f()
+// CHECK-NEXT: 10
+// CHECK-NEXT: abc
+
+package main
+
+type T1 struct {
+ value int
+}
+
+func (t *T1) f(m int) int {
+ return m * t.value
+}
+
+func f1() {
+ var t T1
+ var f func(int) int = t.f
+ t.value = 2
+ println(f(123))
+}
+
+type T2 struct{}
+
+func (T2) f() {
+ println("T2.f()")
+}
+
+func f2() {
+ var f func() = T2{}.f
+ f()
+}
+
+type T3 complex128
+
+func (t T3) f() int {
+ return int(real(t))
+}
+
+func f3() {
+ var f func() int = T3(10).f
+ println(f())
+}
+
+type T4 string
+
+func (t T4) f() string {
+ return string(t)
+}
+
+func f4() {
+ var f func() string = T4("abc").f
+ println(f())
+}
+
+func main() {
+ f1()
+ f2()
+ f3()
+ f4()
+}
diff --git a/test/execution/methods/nilrecv.go b/test/execution/methods/nilrecv.go
new file mode 100644
index 0000000..defe2f4
--- /dev/null
+++ b/test/execution/methods/nilrecv.go
@@ -0,0 +1,31 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+
+package main
+
+type T1 int
+
+func (t *T1) t1() { println(t == nil) }
+
+func constNilRecv() {
+ (*T1)(nil).t1()
+}
+
+func nonConstNilRecv() {
+ var v1 T1
+ v1.t1()
+ var v2 *T1
+ v2.t1()
+ v2 = &v1
+ v2.t1()
+}
+
+func main() {
+ constNilRecv()
+ nonConstNilRecv()
+}
diff --git a/test/execution/methods/selectors.go b/test/execution/methods/selectors.go
new file mode 100644
index 0000000..bcd4173
--- /dev/null
+++ b/test/execution/methods/selectors.go
@@ -0,0 +1,45 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: F1
+// CHECK-NEXT: F2
+// CHECK-NEXT: F1
+// CHECK-NEXT: F2
+
+package main
+
+type S1 struct{}
+type S2 struct {
+ S1
+}
+
+func (s S1) F1() {
+ println("F1")
+}
+
+func (s *S2) F2() {
+ println("F2")
+}
+
+func testUnnamedStructMethods() {
+ // Test method lookup on an unnamed struct type.
+ var x struct {
+ S1
+ S2
+ }
+ x.F1()
+ x.F2()
+}
+
+func main() {
+ var s S2
+
+ // Derive pointer-receiver function.
+ f1 := (*S2).F1
+ f1(&s)
+
+ f2 := (*S2).F2
+ f2(&s)
+
+ testUnnamedStructMethods()
+}
diff --git a/test/execution/new.go b/test/execution/new.go
new file mode 100644
index 0000000..24773de
--- /dev/null
+++ b/test/execution/new.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 2
+// CHECK-NEXT: 4
+
+package main
+
+func main() {
+ x := new(int)
+ println(*x)
+ *x = 2
+ println(*x)
+ *x = *x * *x
+ println(*x)
+}
diff --git a/test/execution/nil.go b/test/execution/nil.go
new file mode 100644
index 0000000..0aa94e7
--- /dev/null
+++ b/test/execution/nil.go
@@ -0,0 +1,32 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0x0
+// CHECK-NEXT: x is nil
+// CHECK-NEXT: y is nil
+// CHECK-NEXT: z is nil
+
+package main
+
+func main() {
+ var x *int = nil
+ println(x)
+
+ if x == nil {
+ println("x is nil")
+ }
+
+ var y interface{}
+ var z interface{} = y
+ if y == nil {
+ println("y is nil")
+ } else {
+ println("y is not nil")
+ }
+
+ if z == nil {
+ println("z is nil")
+ } else {
+ println("z is not nil")
+ }
+}
diff --git a/test/execution/operators/basics.go b/test/execution/operators/basics.go
new file mode 100644
index 0000000..1942586
--- /dev/null
+++ b/test/execution/operators/basics.go
@@ -0,0 +1,133 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 4096
+// CHECK-NEXT: 256
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 4096
+// CHECK-NEXT: 256
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 65280
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 61184
+// CHECK-NEXT: 65024
+// CHECK-NEXT: 65296
+// CHECK-NEXT: 65281
+// CHECK-NEXT: 61184
+// CHECK-NEXT: 65024
+// CHECK-NEXT: 65296
+// CHECK-NEXT: 65281
+// CHECK-NEXT: -62
+// CHECK-NEXT: -246
+// CHECK-NEXT: +1.224560e+002
+// CHECK-NEXT: 3
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 122
+// CHECK-NEXT: -124
+// CHECK-NEXT: 120
+// CHECK-NEXT: 18446744073709547520
+// CHECK-NEXT: false
+// CHECK-NEXT: 2147483648
+// CHECK-NEXT: 9223372036854775808
+// CHECK-NEXT: 2147483648 2147483648
+// CHECK-NEXT: 9223372036854775808 9223372036854775808
+
+package main
+
+import "unsafe"
+
+var global string
+
+var hi = 0xFF00
+var lo = 0xFF00
+
+// borrowed from math package to avoid dependency on standard library
+func float32bits(f float32) uint32 { return *(*uint32)(unsafe.Pointer(&f)) }
+func float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) }
+
+func main() {
+ println(hi & 0x1000)
+ println(hi & 0x0100)
+ println(hi & 0x0010)
+ println(hi & 0x0001)
+ println(lo & 0x1000)
+ println(lo & 0x0100)
+ println(lo & 0x0010)
+ println(lo & 0x0001)
+ println(hi | lo)
+ println(hi ^ hi)
+ println(hi ^ lo)
+ println(lo ^ lo)
+ println(hi ^ 0x1000)
+ println(hi ^ 0x0100)
+ println(hi ^ 0x0010)
+ println(hi ^ 0x0001)
+ println(lo ^ 0x1000)
+ println(lo ^ 0x0100)
+ println(lo ^ 0x0010)
+ println(lo ^ 0x0001)
+ println(-123 >> 1)
+ println(-123 << 1)
+
+ var f float64 = 123.456
+ f--
+ println(f)
+
+ // context of '&' op is used to type the untyped lhs
+ // operand of the shift expression.
+ shift := uint(2)
+ println(uint64(0xFFFFFFFF) & (1<<shift - 1))
+ println((1<<shift - 1) & uint64(0xFFFFFFFF))
+
+ // rhs' type is converted lhs'
+ println(uint32(1) << uint64(2))
+ {
+ var _uint64 uint64
+ var _uint uint
+ x := _uint64 >> (63 - _uint)
+ if x == 2<<_uint {
+ println("!")
+ }
+ }
+
+ // There was a bug related to compound expressions involving
+ // multiple binary logical operators.
+ var a, b, c int
+ if a == 0 && (b != 0 || c != 0) {
+ println("!")
+ }
+
+ var si int = -123
+ var ui int = 123
+ println(^si)
+ println(^ui)
+ println(ui &^ 3)
+
+ // test case from math/modf.go
+ var x uint64 = 0xFFFFFFFFFFFFFFFF
+ var e uint = 40
+ x &^= 1<<(64-12-e) - 1
+ println(x)
+
+ // compare global to non-global
+ println(new(string) == &global)
+
+ // negative zero
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ f32 = -f32
+ f64 = -f64
+ c64 = -c64
+ c128 = -c128
+ println(float32bits(f32))
+ println(float64bits(f64))
+ println(float32bits(real(c64)), float32bits(imag(c64)))
+ println(float64bits(real(c128)), float64bits(imag(c128)))
+}
diff --git a/test/execution/operators/binary_untyped.go b/test/execution/operators/binary_untyped.go
new file mode 100644
index 0000000..f152985
--- /dev/null
+++ b/test/execution/operators/binary_untyped.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+
+package main
+
+func f1(b bool) bool {
+ return b
+}
+
+func main() {
+ x := false
+ y := x
+ x = !y
+ println(x || y)
+}
diff --git a/test/execution/operators/shifts.go b/test/execution/operators/shifts.go
new file mode 100644
index 0000000..728b32b
--- /dev/null
+++ b/test/execution/operators/shifts.go
@@ -0,0 +1,290 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 4294967295
+// CHECK-NEXT: 4294967295
+// CHECK-NEXT: 2147483647
+// CHECK-NEXT: 4294967294
+// CHECK-NEXT: 1073741823
+// CHECK-NEXT: 4294967292
+// CHECK-NEXT: 536870911
+// CHECK-NEXT: 4294967288
+// CHECK-NEXT: 268435455
+// CHECK-NEXT: 4294967280
+// CHECK-NEXT: 134217727
+// CHECK-NEXT: 4294967264
+// CHECK-NEXT: 67108863
+// CHECK-NEXT: 4294967232
+// CHECK-NEXT: 33554431
+// CHECK-NEXT: 4294967168
+// CHECK-NEXT: 16777215
+// CHECK-NEXT: 4294967040
+// CHECK-NEXT: 8388607
+// CHECK-NEXT: 4294966784
+// CHECK-NEXT: 4194303
+// CHECK-NEXT: 4294966272
+// CHECK-NEXT: 2097151
+// CHECK-NEXT: 4294965248
+// CHECK-NEXT: 1048575
+// CHECK-NEXT: 4294963200
+// CHECK-NEXT: 524287
+// CHECK-NEXT: 4294959104
+// CHECK-NEXT: 262143
+// CHECK-NEXT: 4294950912
+// CHECK-NEXT: 131071
+// CHECK-NEXT: 4294934528
+// CHECK-NEXT: 65535
+// CHECK-NEXT: 4294901760
+// CHECK-NEXT: 32767
+// CHECK-NEXT: 4294836224
+// CHECK-NEXT: 16383
+// CHECK-NEXT: 4294705152
+// CHECK-NEXT: 8191
+// CHECK-NEXT: 4294443008
+// CHECK-NEXT: 4095
+// CHECK-NEXT: 4293918720
+// CHECK-NEXT: 2047
+// CHECK-NEXT: 4292870144
+// CHECK-NEXT: 1023
+// CHECK-NEXT: 4290772992
+// CHECK-NEXT: 511
+// CHECK-NEXT: 4286578688
+// CHECK-NEXT: 255
+// CHECK-NEXT: 4278190080
+// CHECK-NEXT: 127
+// CHECK-NEXT: 4261412864
+// CHECK-NEXT: 63
+// CHECK-NEXT: 4227858432
+// CHECK-NEXT: 31
+// CHECK-NEXT: 4160749568
+// CHECK-NEXT: 15
+// CHECK-NEXT: 4026531840
+// CHECK-NEXT: 7
+// CHECK-NEXT: 3758096384
+// CHECK-NEXT: 3
+// CHECK-NEXT: 3221225472
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2147483648
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: 4026531839
+// CHECK-NEXT: 4026531839
+// CHECK-NEXT: 2013265919
+// CHECK-NEXT: 3758096382
+// CHECK-NEXT: 1006632959
+// CHECK-NEXT: 3221225468
+// CHECK-NEXT: 503316479
+// CHECK-NEXT: 2147483640
+// CHECK-NEXT: 251658239
+// CHECK-NEXT: 4294967280
+// CHECK-NEXT: 125829119
+// CHECK-NEXT: 4294967264
+// CHECK-NEXT: 62914559
+// CHECK-NEXT: 4294967232
+// CHECK-NEXT: 31457279
+// CHECK-NEXT: 4294967168
+// CHECK-NEXT: 15728639
+// CHECK-NEXT: 4294967040
+// CHECK-NEXT: 7864319
+// CHECK-NEXT: 4294966784
+// CHECK-NEXT: 3932159
+// CHECK-NEXT: 4294966272
+// CHECK-NEXT: 1966079
+// CHECK-NEXT: 4294965248
+// CHECK-NEXT: 983039
+// CHECK-NEXT: 4294963200
+// CHECK-NEXT: 491519
+// CHECK-NEXT: 4294959104
+// CHECK-NEXT: 245759
+// CHECK-NEXT: 4294950912
+// CHECK-NEXT: 122879
+// CHECK-NEXT: 4294934528
+// CHECK-NEXT: 61439
+// CHECK-NEXT: 4294901760
+// CHECK-NEXT: 30719
+// CHECK-NEXT: 4294836224
+// CHECK-NEXT: 15359
+// CHECK-NEXT: 4294705152
+// CHECK-NEXT: 7679
+// CHECK-NEXT: 4294443008
+// CHECK-NEXT: 3839
+// CHECK-NEXT: 4293918720
+// CHECK-NEXT: 1919
+// CHECK-NEXT: 4292870144
+// CHECK-NEXT: 959
+// CHECK-NEXT: 4290772992
+// CHECK-NEXT: 479
+// CHECK-NEXT: 4286578688
+// CHECK-NEXT: 239
+// CHECK-NEXT: 4278190080
+// CHECK-NEXT: 119
+// CHECK-NEXT: 4261412864
+// CHECK-NEXT: 59
+// CHECK-NEXT: 4227858432
+// CHECK-NEXT: 29
+// CHECK-NEXT: 4160749568
+// CHECK-NEXT: 14
+// CHECK-NEXT: 4026531840
+// CHECK-NEXT: 7
+// CHECK-NEXT: 3758096384
+// CHECK-NEXT: 3
+// CHECK-NEXT: 3221225472
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2147483648
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+// CHECK-NEXT: -1
+// CHECK-NEXT: -1
+// CHECK-NEXT: -1
+// CHECK-NEXT: -2
+// CHECK-NEXT: -1
+// CHECK-NEXT: -4
+// CHECK-NEXT: -1
+// CHECK-NEXT: -8
+// CHECK-NEXT: -1
+// CHECK-NEXT: -16
+// CHECK-NEXT: -1
+// CHECK-NEXT: -32
+// CHECK-NEXT: -1
+// CHECK-NEXT: -64
+// CHECK-NEXT: -1
+// CHECK-NEXT: -128
+// CHECK-NEXT: -1
+// CHECK-NEXT: -256
+// CHECK-NEXT: -1
+// CHECK-NEXT: -512
+// CHECK-NEXT: -1
+// CHECK-NEXT: -1024
+// CHECK-NEXT: -1
+// CHECK-NEXT: -2048
+// CHECK-NEXT: -1
+// CHECK-NEXT: -4096
+// CHECK-NEXT: -1
+// CHECK-NEXT: -8192
+// CHECK-NEXT: -1
+// CHECK-NEXT: -16384
+// CHECK-NEXT: -1
+// CHECK-NEXT: -32768
+// CHECK-NEXT: -1
+// CHECK-NEXT: -65536
+// CHECK-NEXT: -1
+// CHECK-NEXT: -131072
+// CHECK-NEXT: -1
+// CHECK-NEXT: -262144
+// CHECK-NEXT: -1
+// CHECK-NEXT: -524288
+// CHECK-NEXT: -1
+// CHECK-NEXT: -1048576
+// CHECK-NEXT: -1
+// CHECK-NEXT: -2097152
+// CHECK-NEXT: -1
+// CHECK-NEXT: -4194304
+// CHECK-NEXT: -1
+// CHECK-NEXT: -8388608
+// CHECK-NEXT: -1
+// CHECK-NEXT: -16777216
+// CHECK-NEXT: -1
+// CHECK-NEXT: -33554432
+// CHECK-NEXT: -1
+// CHECK-NEXT: -67108864
+// CHECK-NEXT: -1
+// CHECK-NEXT: -134217728
+// CHECK-NEXT: -1
+// CHECK-NEXT: -268435456
+// CHECK-NEXT: -1
+// CHECK-NEXT: -536870912
+// CHECK-NEXT: -1
+// CHECK-NEXT: -1073741824
+// CHECK-NEXT: -1
+// CHECK-NEXT: -2147483648
+// CHECK-NEXT: -1
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 1
+// CHECK-NEXT: 0
+// CHECK-NEXT: 2
+// CHECK-NEXT: 0
+// CHECK-NEXT: 4
+// CHECK-NEXT: 0
+// CHECK-NEXT: 8
+// CHECK-NEXT: 0
+// CHECK-NEXT: 16
+// CHECK-NEXT: 0
+// CHECK-NEXT: 32
+// CHECK-NEXT: 0
+// CHECK-NEXT: 64
+// CHECK-NEXT: 0
+// CHECK-NEXT: 128
+// CHECK-NEXT: 0
+// CHECK-NEXT: 256
+// CHECK-NEXT: 0
+// CHECK-NEXT: 512
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1024
+// CHECK-NEXT: 0
+// CHECK-NEXT: 2048
+// CHECK-NEXT: 0
+// CHECK-NEXT: 4096
+// CHECK-NEXT: 0
+// CHECK-NEXT: 8192
+// CHECK-NEXT: 0
+// CHECK-NEXT: 16384
+// CHECK-NEXT: 0
+// CHECK-NEXT: 32768
+// CHECK-NEXT: 0
+// CHECK-NEXT: 65536
+// CHECK-NEXT: 0
+// CHECK-NEXT: 131072
+// CHECK-NEXT: 0
+// CHECK-NEXT: 262144
+// CHECK-NEXT: 0
+// CHECK-NEXT: 524288
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1048576
+// CHECK-NEXT: 0
+// CHECK-NEXT: 2097152
+// CHECK-NEXT: 0
+// CHECK-NEXT: 4194304
+// CHECK-NEXT: 0
+// CHECK-NEXT: 8388608
+// CHECK-NEXT: 0
+// CHECK-NEXT: 16777216
+// CHECK-NEXT: 0
+// CHECK-NEXT: 33554432
+// CHECK-NEXT: 0
+// CHECK-NEXT: 67108864
+// CHECK-NEXT: 0
+// CHECK-NEXT: 134217728
+// CHECK-NEXT: 0
+// CHECK-NEXT: 268435456
+// CHECK-NEXT: 0
+// CHECK-NEXT: 536870912
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1073741824
+// CHECK-NEXT: 0
+// CHECK-NEXT: -2147483648
+// CHECK-NEXT: 0
+// CHECK-NEXT: 0
+
+package main
+
+func testShrUint32(v uint32) {
+ for i := uint(0); i <= 32; i++ {
+ println(v >> i)
+ println(v << i)
+ }
+}
+
+func testShrInt32(v int32) {
+ for i := uint(0); i <= 32; i++ {
+ println(v >> i)
+ println(v << i)
+ }
+}
+
+func main() {
+ testShrUint32(0xFFFFFFFF)
+ testShrUint32(0xEFFFFFFF)
+ testShrInt32(-1)
+ testShrInt32(1)
+}
diff --git a/test/execution/slices/append.go b/test/execution/slices/append.go
new file mode 100644
index 0000000..50db2ac
--- /dev/null
+++ b/test/execution/slices/append.go
@@ -0,0 +1,254 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 6
+// CHECK-NEXT: 7
+// CHECK-NEXT: 8
+// CHECK-NEXT: 9
+// CHECK-NEXT: 10
+// CHECK-NEXT: 11
+// CHECK-NEXT: 12
+// CHECK-NEXT: 13
+// CHECK-NEXT: 14
+// CHECK-NEXT: 15
+// CHECK-NEXT: 16
+// CHECK-NEXT: 17
+// CHECK-NEXT: 18
+// CHECK-NEXT: 19
+// CHECK-NEXT: 20
+// CHECK-NEXT: 21
+// CHECK-NEXT: 22
+// CHECK-NEXT: 23
+// CHECK-NEXT: 24
+// CHECK-NEXT: 25
+// CHECK-NEXT: 26
+// CHECK-NEXT: 27
+// CHECK-NEXT: 28
+// CHECK-NEXT: 29
+// CHECK-NEXT: 30
+// CHECK-NEXT: 31
+// CHECK-NEXT: 32
+// CHECK-NEXT: 33
+// CHECK-NEXT: 34
+// CHECK-NEXT: 35
+// CHECK-NEXT: 36
+// CHECK-NEXT: 37
+// CHECK-NEXT: 38
+// CHECK-NEXT: 39
+// CHECK-NEXT: 40
+// CHECK-NEXT: 41
+// CHECK-NEXT: 42
+// CHECK-NEXT: 43
+// CHECK-NEXT: 44
+// CHECK-NEXT: 45
+// CHECK-NEXT: 46
+// CHECK-NEXT: 47
+// CHECK-NEXT: 48
+// CHECK-NEXT: 49
+// CHECK-NEXT: 50
+// CHECK-NEXT: 51
+// CHECK-NEXT: 52
+// CHECK-NEXT: 53
+// CHECK-NEXT: 54
+// CHECK-NEXT: 55
+// CHECK-NEXT: 56
+// CHECK-NEXT: 57
+// CHECK-NEXT: 58
+// CHECK-NEXT: 59
+// CHECK-NEXT: 60
+// CHECK-NEXT: 61
+// CHECK-NEXT: 62
+// CHECK-NEXT: 63
+// CHECK-NEXT: 64
+// CHECK-NEXT: 65
+// CHECK-NEXT: 66
+// CHECK-NEXT: 67
+// CHECK-NEXT: 68
+// CHECK-NEXT: 69
+// CHECK-NEXT: 70
+// CHECK-NEXT: 71
+// CHECK-NEXT: 72
+// CHECK-NEXT: 73
+// CHECK-NEXT: 74
+// CHECK-NEXT: 75
+// CHECK-NEXT: 76
+// CHECK-NEXT: 77
+// CHECK-NEXT: 78
+// CHECK-NEXT: 79
+// CHECK-NEXT: 80
+// CHECK-NEXT: 81
+// CHECK-NEXT: 82
+// CHECK-NEXT: 83
+// CHECK-NEXT: 84
+// CHECK-NEXT: 85
+// CHECK-NEXT: 86
+// CHECK-NEXT: 87
+// CHECK-NEXT: 88
+// CHECK-NEXT: 89
+// CHECK-NEXT: 90
+// CHECK-NEXT: 91
+// CHECK-NEXT: 92
+// CHECK-NEXT: 93
+// CHECK-NEXT: 94
+// CHECK-NEXT: 95
+// CHECK-NEXT: 96
+// CHECK-NEXT: 97
+// CHECK-NEXT: 98
+// CHECK-NEXT: 99
+// CHECK-NEXT: 0
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: 4
+// CHECK-NEXT: 5
+// CHECK-NEXT: 6
+// CHECK-NEXT: 7
+// CHECK-NEXT: 8
+// CHECK-NEXT: 9
+// CHECK-NEXT: 10
+// CHECK-NEXT: 11
+// CHECK-NEXT: 12
+// CHECK-NEXT: 13
+// CHECK-NEXT: 14
+// CHECK-NEXT: 15
+// CHECK-NEXT: 16
+// CHECK-NEXT: 17
+// CHECK-NEXT: 18
+// CHECK-NEXT: 19
+// CHECK-NEXT: 20
+// CHECK-NEXT: 21
+// CHECK-NEXT: 22
+// CHECK-NEXT: 23
+// CHECK-NEXT: 24
+// CHECK-NEXT: 25
+// CHECK-NEXT: 26
+// CHECK-NEXT: 27
+// CHECK-NEXT: 28
+// CHECK-NEXT: 29
+// CHECK-NEXT: 30
+// CHECK-NEXT: 31
+// CHECK-NEXT: 32
+// CHECK-NEXT: 33
+// CHECK-NEXT: 34
+// CHECK-NEXT: 35
+// CHECK-NEXT: 36
+// CHECK-NEXT: 37
+// CHECK-NEXT: 38
+// CHECK-NEXT: 39
+// CHECK-NEXT: 40
+// CHECK-NEXT: 41
+// CHECK-NEXT: 42
+// CHECK-NEXT: 43
+// CHECK-NEXT: 44
+// CHECK-NEXT: 45
+// CHECK-NEXT: 46
+// CHECK-NEXT: 47
+// CHECK-NEXT: 48
+// CHECK-NEXT: 49
+// CHECK-NEXT: 50
+// CHECK-NEXT: 51
+// CHECK-NEXT: 52
+// CHECK-NEXT: 53
+// CHECK-NEXT: 54
+// CHECK-NEXT: 55
+// CHECK-NEXT: 56
+// CHECK-NEXT: 57
+// CHECK-NEXT: 58
+// CHECK-NEXT: 59
+// CHECK-NEXT: 60
+// CHECK-NEXT: 61
+// CHECK-NEXT: 62
+// CHECK-NEXT: 63
+// CHECK-NEXT: 64
+// CHECK-NEXT: 65
+// CHECK-NEXT: 66
+// CHECK-NEXT: 67
+// CHECK-NEXT: 68
+// CHECK-NEXT: 69
+// CHECK-NEXT: 70
+// CHECK-NEXT: 71
+// CHECK-NEXT: 72
+// CHECK-NEXT: 73
+// CHECK-NEXT: 74
+// CHECK-NEXT: 75
+// CHECK-NEXT: 76
+// CHECK-NEXT: 77
+// CHECK-NEXT: 78
+// CHECK-NEXT: 79
+// CHECK-NEXT: 80
+// CHECK-NEXT: 81
+// CHECK-NEXT: 82
+// CHECK-NEXT: 83
+// CHECK-NEXT: 84
+// CHECK-NEXT: 85
+// CHECK-NEXT: 86
+// CHECK-NEXT: 87
+// CHECK-NEXT: 88
+// CHECK-NEXT: 89
+// CHECK-NEXT: 90
+// CHECK-NEXT: 91
+// CHECK-NEXT: 92
+// CHECK-NEXT: 93
+// CHECK-NEXT: 94
+// CHECK-NEXT: 95
+// CHECK-NEXT: 96
+// CHECK-NEXT: 97
+// CHECK-NEXT: 98
+// CHECK-NEXT: 99
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+// CHECK-NEXT: abcdef
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: false false
+// CHECK-NEXT: true true
+// CHECK-NEXT: false false
+
+package main
+
+func stringtobytes() {
+ var b []byte
+ b = append(b, "abc"...)
+ b = append(b, "def"...)
+ println(string(b))
+}
+
+func appendnothing() {
+ var x []string
+ println(append(x) == nil)
+ x = append(x, "!")
+ println(len(append(x)) == 1)
+}
+
+func appendmulti() {
+ a := append([]bool{}, []bool{false, true, false}...)
+ b := append([]bool{}, false, true, false)
+ for i := range a {
+ println(a[i], b[i])
+ }
+}
+
+func main() {
+ x := []int{}
+ for i := 0; i < 100; i++ {
+ x = append(x, i)
+ }
+ for i := 0; i < len(x); i++ {
+ println(x[i])
+ }
+ y := []int{1, 2, 3}
+ x = append(x, y...)
+ for i := 0; i < len(x); i++ {
+ println(x[i])
+ }
+ stringtobytes()
+ appendnothing()
+ appendmulti()
+}
diff --git a/test/execution/slices/cap.go b/test/execution/slices/cap.go
new file mode 100644
index 0000000..33a34ff
--- /dev/null
+++ b/test/execution/slices/cap.go
@@ -0,0 +1,50 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 0
+// CHECK-NEXT: 0 0
+// CHECK-NEXT: 0 0
+// CHECK-NEXT: 1 1
+// CHECK-NEXT: 1 1
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: 2 9
+// CHECK-NEXT: 3 9
+// CHECK-NEXT: 999
+// CHECK-NEXT: 999
+// CHECK-NEXT: 1 2
+
+package main
+
+func test(l, c int) {
+ var s []int
+ if l != -1 {
+ if c == -1 {
+ s = make([]int, l)
+ } else {
+ s = make([]int, l, c)
+ }
+ }
+ println(len(s), cap(s))
+}
+
+func main() {
+ test(-1, -1)
+ test(0, -1)
+ test(0, 0)
+ test(1, -1)
+ test(1, 1)
+ test(1, 2)
+
+ // make sure capacity is transferred to slice
+ s := make([]int, 5, 10)
+ s1 := s[1:3]
+ println(len(s1), cap(s1))
+
+ s2 := append(s1, 999)
+ println(len(s2), cap(s2))
+ println(s2[2])
+ println(s[3])
+
+ s3 := s1[0:1:2]
+ println(len(s3), cap(s3))
+}
diff --git a/test/execution/slices/compare.go b/test/execution/slices/compare.go
new file mode 100644
index 0000000..93e3071
--- /dev/null
+++ b/test/execution/slices/compare.go
@@ -0,0 +1,26 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ var s []int
+ println(s == nil)
+ println(s != nil)
+ println(nil == s)
+ println(nil != s)
+ s = make([]int, 0)
+ println(s == nil)
+ println(s != nil)
+ println(nil == s)
+ println(nil != s)
+}
diff --git a/test/execution/slices/copy.go b/test/execution/slices/copy.go
new file mode 100644
index 0000000..a6bf6c5
--- /dev/null
+++ b/test/execution/slices/copy.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 5
+// CHECK-NEXT: 0
+
+package main
+
+func main() {
+ a := make([]int, 10)
+ b := make([]int, 10)
+ for i, _ := range b {
+ b[i] = 1
+ }
+ println(copy(a[:5], b)) // expect 5
+ println(a[5]) // expect 0
+}
diff --git a/test/execution/slices/index.go b/test/execution/slices/index.go
new file mode 100644
index 0000000..20d213e
--- /dev/null
+++ b/test/execution/slices/index.go
@@ -0,0 +1,14 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+
+package main
+
+func blah() []int {
+ return make([]int, 1)
+}
+
+func main() {
+ println(blah()[0])
+}
diff --git a/test/execution/slices/literal.go b/test/execution/slices/literal.go
new file mode 100644
index 0000000..42adabf
--- /dev/null
+++ b/test/execution/slices/literal.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+
+package main
+
+func main() {
+ x := []int{1, 2, 3}
+ for i := 0; i < len(x); i++ {
+ println(x[i])
+ }
+}
diff --git a/test/execution/slices/make.go b/test/execution/slices/make.go
new file mode 100644
index 0000000..30ecd6c
--- /dev/null
+++ b/test/execution/slices/make.go
@@ -0,0 +1,23 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 0
+// CHECK-NEXT: 1 0
+// CHECK-NEXT: 2 0
+// CHECK-NEXT: 3 0
+// CHECK-NEXT: 4 0
+// CHECK-NEXT: 5 666
+// CHECK-NEXT: 6 0
+// CHECK-NEXT: 7 0
+// CHECK-NEXT: 8 0
+// CHECK-NEXT: 9 0
+
+package main
+
+func main() {
+ x := make([]int, 10)
+ x[5] = 666
+ for i, val := range x {
+ println(i, val)
+ }
+}
diff --git a/test/execution/slices/sliceexpr.go b/test/execution/slices/sliceexpr.go
new file mode 100644
index 0000000..56a7b52
--- /dev/null
+++ b/test/execution/slices/sliceexpr.go
@@ -0,0 +1,39 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: a
+// CHECK-NEXT: 0 2
+// CHECK-NEXT: 1 3
+// CHECK-NEXT: b
+// CHECK-NEXT: 0 3
+// CHECK-NEXT: 1 4
+// CHECK-NEXT: c
+// CHECK-NEXT: 0 1
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: d
+// CHECK-NEXT: 0 1
+// CHECK-NEXT: 1 2
+// CHECK-NEXT: 2 3
+// CHECK-NEXT: 3 4
+
+package main
+
+func main() {
+ x := []int{1, 2, 3, 4}
+ println("a")
+ for i, val := range x[1:3] {
+ println(i, val)
+ }
+ println("b")
+ for i, val := range x[2:] {
+ println(i, val)
+ }
+ println("c")
+ for i, val := range x[:2] {
+ println(i, val)
+ }
+ println("d")
+ for i, val := range x[:] {
+ println(i, val)
+ }
+}
diff --git a/test/execution/strings/add.go b/test/execution/strings/add.go
new file mode 100644
index 0000000..daf6921
--- /dev/null
+++ b/test/execution/strings/add.go
@@ -0,0 +1,15 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 3 3 6
+// CHECK-NEXT: abc123
+
+package main
+
+func main() {
+ a := "abc"
+ b := "123"
+ c := a + b
+ println(len(a), len(b), len(c))
+ println(c)
+}
diff --git a/test/execution/strings/bytes.go b/test/execution/strings/bytes.go
new file mode 100644
index 0000000..a2c450a
--- /dev/null
+++ b/test/execution/strings/bytes.go
@@ -0,0 +1,41 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: testBytesConversion: true
+// CHECK-NEXT: 97
+// CHECK-NEXT: 98
+// CHECK-NEXT: 99
+// CHECK-NEXT: abc
+// CHECK-NEXT: !bc
+// CHECK-NEXT: testBytesCopy: true
+
+package main
+
+type namedByte byte
+
+func testBytesConversion() {
+ s := "abc"
+ b := []byte(s)
+ println("testBytesConversion:", s == string(b))
+ nb := []namedByte(s)
+ for _, v := range nb {
+ println(v)
+ }
+ b[0] = '!'
+ println(s)
+ s = string(b)
+ b[0] = 'a'
+ println(s)
+}
+
+func testBytesCopy() {
+ s := "abc"
+ b := make([]byte, len(s))
+ copy(b, s)
+ println("testBytesCopy:", string(b) == s)
+}
+
+func main() {
+ testBytesConversion()
+ testBytesCopy()
+}
diff --git a/test/execution/strings/compare.go b/test/execution/strings/compare.go
new file mode 100644
index 0000000..c8e4f09
--- /dev/null
+++ b/test/execution/strings/compare.go
@@ -0,0 +1,54 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ x := "abc"
+ y := "def"
+ z := "abcd"
+
+ println(x == x) // true
+ println(x == y) // false
+ println(x != x) // false
+ println(x != y) // true
+ println(x < x) // false
+ println(x < y) // true
+ println(y < x) // false
+ println(x > x) // false
+ println(x > y) // false
+ println(y > x) // true
+
+ println(x == z) // false
+ println(z == x) // false
+ println(x < z) // true
+ println(x > z) // false
+ println(z < x) // false
+ println(z > x) // true
+
+ println(x <= x) // true
+ println(x <= y) // true
+ println(x >= x) // true
+ println(y >= x) // true
+}
diff --git a/test/execution/strings/index.go b/test/execution/strings/index.go
new file mode 100644
index 0000000..dca7ddf
--- /dev/null
+++ b/test/execution/strings/index.go
@@ -0,0 +1,11 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 97 98 99
+
+package main
+
+func main() {
+ s := "abc"
+ println(s[0], s[1], s[2])
+}
diff --git a/test/execution/strings/range.go b/test/execution/strings/range.go
new file mode 100644
index 0000000..c68b02a
--- /dev/null
+++ b/test/execution/strings/range.go
@@ -0,0 +1,86 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0 46 1
+// CHECK-NEXT: 0 46
+// CHECK-NEXT: 0 169 1
+// CHECK-NEXT: 0 169
+// CHECK-NEXT: 0 8364 1
+// CHECK-NEXT: 0 8364
+// CHECK-NEXT: 0 66560 1
+// CHECK-NEXT: 0 66560
+// CHECK-NEXT: 0 83 1
+// CHECK-NEXT: 1 97 2
+// CHECK-NEXT: 2 108 3
+// CHECK-NEXT: 3 101 4
+// CHECK-NEXT: 4 32 5
+// CHECK-NEXT: 5 112 6
+// CHECK-NEXT: 6 114 7
+// CHECK-NEXT: 7 105 8
+// CHECK-NEXT: 8 99 9
+// CHECK-NEXT: 9 101 10
+// CHECK-NEXT: 10 58 11
+// CHECK-NEXT: 11 32 12
+// CHECK-NEXT: 12 8364 13
+// CHECK-NEXT: 15 48 14
+// CHECK-NEXT: 16 46 15
+// CHECK-NEXT: 17 57 16
+// CHECK-NEXT: 18 57 17
+// CHECK-NEXT: 0 83
+// CHECK-NEXT: 1 97
+// CHECK-NEXT: 2 108
+// CHECK-NEXT: 3 101
+// CHECK-NEXT: 4 32
+// CHECK-NEXT: 5 112
+// CHECK-NEXT: 6 114
+// CHECK-NEXT: 7 105
+// CHECK-NEXT: 8 99
+// CHECK-NEXT: 9 101
+// CHECK-NEXT: 10 58
+// CHECK-NEXT: 11 32
+// CHECK-NEXT: 12 8364
+// CHECK-NEXT: 15 48
+// CHECK-NEXT: 16 46
+// CHECK-NEXT: 17 57
+// CHECK-NEXT: 18 57
+
+package main
+
+func printchars(s string) {
+ var x int
+ for i, c := range s {
+ // test loop-carried dependence (x++), introducing a Phi node
+ x++
+ println(i, c, x)
+ }
+
+ // now test with plain old assignment
+ var i int
+ var c rune
+ for i, c = range s {
+ println(i, c)
+ if i == len(s)-1 {
+ // test multiple branches to loop header
+ continue
+ }
+ }
+}
+
+func main() {
+ // 1 bytes
+ printchars(".")
+
+ // 2 bytes
+ printchars("©")
+
+ // 3 bytes
+ printchars("€")
+
+ // 4 bytes
+ printchars("𐐀")
+
+ // mixed
+ printchars("Sale price: €0.99")
+
+ // TODO add test cases for invalid sequences
+}
diff --git a/test/execution/strings/runetostring.go b/test/execution/strings/runetostring.go
new file mode 100644
index 0000000..e6a8c5d
--- /dev/null
+++ b/test/execution/strings/runetostring.go
@@ -0,0 +1,74 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: test( 46 )
+// CHECK-NEXT: .
+// CHECK-NEXT: 46
+// CHECK-NEXT: 0 46
+// CHECK-NEXT: test( 169 )
+// CHECK-NEXT: ©
+// CHECK-NEXT: 194
+// CHECK-NEXT: 169
+// CHECK-NEXT: 0 169
+// CHECK-NEXT: test( 8364 )
+// CHECK-NEXT: €
+// CHECK-NEXT: 226
+// CHECK-NEXT: 130
+// CHECK-NEXT: 172
+// CHECK-NEXT: 0 8364
+// CHECK-NEXT: test( 66560 )
+// CHECK-NEXT: 𐐀
+// CHECK-NEXT: 240
+// CHECK-NEXT: 144
+// CHECK-NEXT: 144
+// CHECK-NEXT: 128
+// CHECK-NEXT: 0 66560
+// CHECK-NEXT: .©€𐐀
+// CHECK-NEXT: 4 4 4
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+
+package main
+
+func test(r rune) {
+ println("test(", r, ")")
+ s := string(r)
+ println(s)
+ for i := 0; i < len(s); i++ {
+ println(s[i])
+ }
+ for i, r := range s {
+ println(i, r)
+ }
+}
+
+type namedRune rune
+
+func testslice(r1 []rune) {
+ s := string(r1)
+ println(s)
+ r2 := []rune(s)
+ r3 := []namedRune(s)
+ println(len(r1), len(r2), len(r3))
+ if len(r2) == len(r1) && len(r3) == len(r1) {
+ for i := range r2 {
+ println(r1[i] == r2[i])
+ println(r1[i] == rune(r3[i]))
+ }
+ }
+}
+
+func main() {
+ var runes = []rune{'.', '©', '€', '𐐀'}
+ test(runes[0])
+ test(runes[1])
+ test(runes[2])
+ test(runes[3])
+ testslice(runes)
+}
diff --git a/test/execution/strings/slice.go b/test/execution/strings/slice.go
new file mode 100644
index 0000000..ec1b40c
--- /dev/null
+++ b/test/execution/strings/slice.go
@@ -0,0 +1,17 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: abcdef
+// CHECK-NEXT: bcdef
+// CHECK-NEXT: abc
+// CHECK-NEXT: bcd
+
+package main
+
+func main() {
+ s := "abcdef"
+ println(s[:])
+ println(s[1:])
+ println(s[:3])
+ println(s[1:4])
+}
diff --git a/test/execution/structs/compare.go b/test/execution/structs/compare.go
new file mode 100644
index 0000000..20a8ead
--- /dev/null
+++ b/test/execution/structs/compare.go
@@ -0,0 +1,52 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+// CHECK-NEXT: true
+// CHECK-NEXT: false
+// CHECK-NEXT: false
+// CHECK-NEXT: true
+
+package main
+
+type S0 struct{}
+
+type S1 struct {
+ a int
+}
+
+type S2 struct {
+ a, b int
+}
+
+func testS0() {
+ println(S0{} == S0{})
+ println(S0{} != S0{})
+}
+
+func testS1() {
+ println(S1{1} == S1{1})
+ println(S1{1} != S1{1})
+ println(S1{1} == S1{2})
+ println(S1{1} != S1{2})
+}
+
+func testS2() {
+ s1 := S2{1, 2}
+ s2 := S2{1, 3}
+ println(s1 == s1)
+ println(s1 == s2)
+ println(s1 != s1)
+ println(s1 != s2)
+}
+
+func main() {
+ testS0()
+ testS1()
+ testS2()
+}
diff --git a/test/execution/structs/embed.go b/test/execution/structs/embed.go
new file mode 100644
index 0000000..d519794
--- /dev/null
+++ b/test/execution/structs/embed.go
@@ -0,0 +1,58 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: A.test 1
+// CHECK-NEXT: A.testA
+// CHECK-NEXT: A.testA2
+// CHECK-NEXT: B.test 2
+// CHECK-NEXT: A.testA
+// CHECK-NEXT: A.testA2
+// CHECK-NEXT: A.testA
+
+package main
+
+type A struct{ aval int }
+
+func (a *A) test() {
+ println("A.test", a.aval)
+}
+
+func (a *A) testA() {
+ println("A.testA")
+}
+
+func (a A) testA2() {
+ println("A.testA2")
+}
+
+type B struct {
+ A
+ bval int
+}
+
+func (b B) test() {
+ println("B.test", b.bval)
+}
+
+type C struct {
+ *B
+ cval int
+}
+
+func main() {
+ var b B
+ b.aval = 1
+ b.bval = 2
+ b.A.test()
+ b.A.testA()
+ b.A.testA2()
+ b.test()
+ b.testA()
+ b.testA2()
+
+ var c C
+ c.B = &b
+ c.cval = 3
+ c.testA()
+ //c.testA2()
+}
diff --git a/test/execution/switch/branch.go b/test/execution/switch/branch.go
new file mode 100644
index 0000000..a7bca9a
--- /dev/null
+++ b/test/execution/switch/branch.go
@@ -0,0 +1,23 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: true
+// CHECK-NEXT: false
+
+package main
+
+func main() {
+ switch true {
+ default:
+ break
+ println("default")
+ }
+
+ switch true {
+ case true:
+ println("true")
+ fallthrough
+ case false:
+ println("false")
+ }
+}
diff --git a/test/execution/switch/default.go b/test/execution/switch/default.go
new file mode 100644
index 0000000..2ae8509
--- /dev/null
+++ b/test/execution/switch/default.go
@@ -0,0 +1,21 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: default
+// CHECK-NEXT: true
+
+package main
+
+func main() {
+ switch true {
+ default:
+ println("default")
+ }
+
+ switch {
+ default:
+ println("default")
+ case true:
+ println("true")
+ }
+}
diff --git a/test/execution/switch/empty.go b/test/execution/switch/empty.go
new file mode 100644
index 0000000..4d20679
--- /dev/null
+++ b/test/execution/switch/empty.go
@@ -0,0 +1,16 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: f was called
+
+package main
+
+func f() int {
+ println("f was called")
+ return 123
+}
+
+func main() {
+ switch f() {
+ }
+}
diff --git a/test/execution/switch/scope.go b/test/execution/switch/scope.go
new file mode 100644
index 0000000..73bab3c
--- /dev/null
+++ b/test/execution/switch/scope.go
@@ -0,0 +1,20 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 2
+
+package main
+
+func main() {
+ // case clauses have their own scope.
+ switch {
+ case true, false:
+ x := 1
+ println(x)
+ fallthrough
+ default:
+ x := 2
+ println(x)
+ }
+}
diff --git a/test/execution/switch/strings.go b/test/execution/switch/strings.go
new file mode 100644
index 0000000..7f0c99d
--- /dev/null
+++ b/test/execution/switch/strings.go
@@ -0,0 +1,21 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: abc
+// CHECK-NEXT: def, abc
+
+package main
+
+func main() {
+ switch "abc" {
+ case "def":
+ println("def")
+ case "abc":
+ println("abc")
+ }
+
+ switch "abc" {
+ case "def", "abc":
+ println("def, abc")
+ }
+}
diff --git a/test/execution/switch/type.go b/test/execution/switch/type.go
new file mode 100644
index 0000000..b43231c
--- /dev/null
+++ b/test/execution/switch/type.go
@@ -0,0 +1,72 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: int64 123
+// CHECK-NEXT: default
+// CHECK-NEXT: uint8 or int8
+// CHECK-NEXT: uint8 or int8
+// CHECK-NEXT: N
+
+package main
+
+func test(i interface{}) {
+ switch x := i.(type) {
+ case int64:
+ println("int64", x)
+ // FIXME
+ //case string:
+ // println("string", x)
+ default:
+ println("default")
+ }
+}
+
+type stringer interface {
+ String() string
+}
+
+func printany(i interface{}) {
+ switch v := i.(type) {
+ case nil:
+ print("nil", v)
+ case stringer:
+ print(v.String())
+ case error:
+ print(v.Error())
+ case int:
+ print(v)
+ case string:
+ print(v)
+ }
+}
+
+func multi(i interface{}) {
+ switch i.(type) {
+ case uint8, int8:
+ println("uint8 or int8")
+ default:
+ println("something else")
+ }
+}
+
+type N int
+
+func (n N) String() string { return "N" }
+
+func named() {
+ var x interface{} = N(123)
+ switch x := x.(type) {
+ case N:
+ // Test for bug: previously, type switch was
+ // assigning underlying type of N (int).
+ println(x.String())
+ }
+}
+
+func main() {
+ test(int64(123))
+ test("abc")
+ multi(uint8(123))
+ multi(int8(123))
+ named()
+}
diff --git a/test/execution/types/named.go b/test/execution/types/named.go
new file mode 100644
index 0000000..08fd791
--- /dev/null
+++ b/test/execution/types/named.go
@@ -0,0 +1,37 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 24
+// CHECK-NEXT: 16
+// CHECK-NEXT: 0
+
+package main
+
+import "unsafe"
+
+func f1() {
+ type T struct {
+ a, b, c int
+ }
+ var t T
+ println(unsafe.Sizeof(t))
+}
+
+func f2() {
+ type T interface{}
+ var t T
+ t = 1
+ println(unsafe.Sizeof(t))
+}
+
+func f3() {
+ type T struct{}
+ var t T
+ println(unsafe.Sizeof(t))
+}
+
+func main() {
+ f1()
+ f2()
+ f3()
+}
diff --git a/test/execution/types/recursive.go b/test/execution/types/recursive.go
new file mode 100644
index 0000000..69ac09b
--- /dev/null
+++ b/test/execution/types/recursive.go
@@ -0,0 +1,29 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 3
+// CHECK-NEXT: 4
+
+package main
+
+type T1 *T1
+
+func count(t T1) int {
+ if t == nil {
+ return 1
+ }
+ return 1 + count(*t)
+}
+
+func testSelfPointer() {
+ var a T1
+ var b T1
+ var c T1 = &b
+ *c = &a
+ println(count(c))
+ println(count(&c))
+}
+
+func main() {
+ testSelfPointer()
+}
diff --git a/test/execution/unsafe/const_sizeof.go b/test/execution/unsafe/const_sizeof.go
new file mode 100644
index 0000000..ade9d0b
--- /dev/null
+++ b/test/execution/unsafe/const_sizeof.go
@@ -0,0 +1,18 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 8
+// CHECK-NEXT: 8
+
+package main
+
+import "unsafe"
+
+const ptrSize = unsafe.Sizeof((*byte)(nil))
+
+var x [ptrSize]int
+
+func main() {
+ println(ptrSize)
+ println(len(x))
+}
diff --git a/test/execution/unsafe/offsetof.go b/test/execution/unsafe/offsetof.go
new file mode 100644
index 0000000..88b313a
--- /dev/null
+++ b/test/execution/unsafe/offsetof.go
@@ -0,0 +1,26 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 0
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 16
+
+package main
+
+import "unsafe"
+
+type S struct {
+ a int16
+ b int32
+ c int8
+ d int64
+}
+
+func main() {
+ var s S
+ println(unsafe.Offsetof(s.a))
+ println(unsafe.Offsetof(s.b))
+ println(unsafe.Offsetof(s.c))
+ println(unsafe.Offsetof(s.d))
+}
diff --git a/test/execution/unsafe/pointer.go b/test/execution/unsafe/pointer.go
new file mode 100644
index 0000000..20dc64f
--- /dev/null
+++ b/test/execution/unsafe/pointer.go
@@ -0,0 +1,21 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 123
+// CHECK-NEXT: 456
+
+package main
+
+import "unsafe"
+
+func main() {
+ var i [2]int
+ i[0] = 123
+ i[1] = 456
+ ptr := &i[0]
+ println(*ptr)
+ ptr_i := unsafe.Pointer(ptr)
+ ptr_i = unsafe.Pointer(uintptr(ptr_i) + unsafe.Sizeof(i[0]))
+ ptr = (*int)(ptr_i)
+ println(*ptr)
+}
diff --git a/test/execution/unsafe/sizeof_array.go b/test/execution/unsafe/sizeof_array.go
new file mode 100644
index 0000000..2928de7
--- /dev/null
+++ b/test/execution/unsafe/sizeof_array.go
@@ -0,0 +1,18 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 12
+
+package main
+
+import "unsafe"
+
+type uint24 struct {
+ a uint16
+ b uint8
+}
+
+func main() {
+ var a [3]uint24
+ println(unsafe.Sizeof(a))
+}
diff --git a/test/execution/unsafe/sizeof_basic.go b/test/execution/unsafe/sizeof_basic.go
new file mode 100644
index 0000000..a9a467b
--- /dev/null
+++ b/test/execution/unsafe/sizeof_basic.go
@@ -0,0 +1,102 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 1
+// CHECK-NEXT: 8
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 16
+// CHECK-NEXT: 16
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 1
+// CHECK-NEXT: 8
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 4
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+// CHECK-NEXT: 8
+
+package main
+
+import "unsafe"
+
+func main() {
+ var b bool
+ var i int
+ var i8 int8
+ var i16 int16
+ var i32 int32
+ var i64 int64
+ var u uint
+ var u8 uint8
+ var u16 uint16
+ var u32 uint32
+ var u64 uint64
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ var s string
+ var p unsafe.Pointer
+ var up uintptr
+
+ println(unsafe.Sizeof(b))
+ println(unsafe.Sizeof(i))
+ println(unsafe.Sizeof(i8))
+ println(unsafe.Sizeof(i16))
+ println(unsafe.Sizeof(i32))
+ println(unsafe.Sizeof(i64))
+ println(unsafe.Sizeof(u))
+ println(unsafe.Sizeof(u8))
+ println(unsafe.Sizeof(u16))
+ println(unsafe.Sizeof(u32))
+ println(unsafe.Sizeof(u64))
+ println(unsafe.Sizeof(f32))
+ println(unsafe.Sizeof(f64))
+ println(unsafe.Sizeof(c64))
+ println(unsafe.Sizeof(c128))
+ println(unsafe.Sizeof(s))
+ println(unsafe.Sizeof(p))
+ println(unsafe.Sizeof(up))
+
+ println(unsafe.Alignof(b))
+ println(unsafe.Alignof(i))
+ println(unsafe.Alignof(i8))
+ println(unsafe.Alignof(i16))
+ println(unsafe.Alignof(i32))
+ println(unsafe.Alignof(i64))
+ println(unsafe.Alignof(u))
+ println(unsafe.Alignof(u8))
+ println(unsafe.Alignof(u16))
+ println(unsafe.Alignof(u32))
+ println(unsafe.Alignof(u64))
+ println(unsafe.Alignof(f32))
+ println(unsafe.Alignof(f64))
+ println(unsafe.Alignof(c64))
+ println(unsafe.Alignof(c128))
+ println(unsafe.Alignof(s))
+ println(unsafe.Alignof(p))
+ println(unsafe.Alignof(up))
+}
diff --git a/test/execution/unsafe/sizeof_struct.go b/test/execution/unsafe/sizeof_struct.go
new file mode 100644
index 0000000..e8612d2
--- /dev/null
+++ b/test/execution/unsafe/sizeof_struct.go
@@ -0,0 +1,19 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 24
+
+package main
+
+import "unsafe"
+
+type a struct {
+ a int16
+ b int32
+ c int8
+ d int64
+}
+
+func main() {
+ println(unsafe.Sizeof(a{}))
+}
diff --git a/test/execution/var.go b/test/execution/var.go
new file mode 100644
index 0000000..71b025d
--- /dev/null
+++ b/test/execution/var.go
@@ -0,0 +1,37 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: woobie
+// CHECK-NEXT: 579 456
+// CHECK-NEXT: 12 +3.450000e+000
+// CHECK-NEXT: -1
+
+package main
+
+func Blah() int {
+ println("woobie")
+ return 123
+}
+
+func F1() (int, float64) {
+ return 12, 3.45
+}
+
+var X = Y + Blah() // == 579
+var Y = 123 + Z // == 456
+
+var X1, Y1 = F1()
+
+const (
+ _ = 333 * iota
+ Z
+)
+
+var I interface{} = -1
+var I1 = I.(int)
+
+func main() {
+ println(X, Y)
+ println(X1, Y1)
+ println(I1)
+}
diff --git a/test/execution/varargs.go b/test/execution/varargs.go
new file mode 100644
index 0000000..ca99416
--- /dev/null
+++ b/test/execution/varargs.go
@@ -0,0 +1,31 @@
+// RUN: llgo -o %t %s
+// RUN: %t 2>&1 | FileCheck %s
+
+// CHECK: 3
+// CHECK-NEXT: 123
+// CHECK-NEXT: 456
+// CHECK-NEXT: 789
+// CHECK-NEXT: 4
+// CHECK-NEXT: 123
+// CHECK-NEXT: 456
+// CHECK-NEXT: 789
+// CHECK-NEXT: 101112
+// CHECK-NEXT: 3
+// CHECK-NEXT: 1
+// CHECK-NEXT: 2
+// CHECK-NEXT: 3
+
+package main
+
+func p(i ...int) {
+ println(len(i))
+ for j := 0; j < len(i); j++ {
+ println(i[j])
+ }
+}
+
+func main() {
+ p(123, 456, 789)
+ p(123, 456, 789, 101112)
+ p([]int{1, 2, 3}...)
+}
diff --git a/test/gllgo/dead.go b/test/gllgo/dead.go
new file mode 100644
index 0000000..31d0de8
--- /dev/null
+++ b/test/gllgo/dead.go
@@ -0,0 +1,6 @@
+// RUN: llgo -O0 -S -o - %s | FileCheck %s
+
+package gotest
+
+// CHECK-NOT: deadfunc
+func deadfunc()
diff --git a/test/irgen/cabi.go b/test/irgen/cabi.go
new file mode 100644
index 0000000..efa4489
--- /dev/null
+++ b/test/irgen/cabi.go
@@ -0,0 +1,23 @@
+// RUN: llgo -S -emit-llvm -o - %s | FileCheck %s
+
+package foo
+
+// CHECK: define void @foo.Test01_SI8(i8 signext)
+func Test01_SI8(x int8) {}
+// CHECK: define void @foo.Test02_UI8(i8 zeroext)
+func Test02_UI8(x uint8) {}
+
+// CHECK: define void @foo.Test03_SI16(i16 signext)
+func Test03_SI16(x int16) {}
+// CHECK: define void @foo.Test04_UI16(i16 zeroext)
+func Test04_UI16(x uint16) {}
+
+// CHECK: define void @foo.Test05_SI32(i32)
+func Test05_SI32(x int32) {}
+// CHECK: define void @foo.Test06_UI32(i32)
+func Test06_UI32(x uint32) {}
+
+// CHECK: define void @foo.Test07_SI64(i64)
+func Test07_SI64(x int64) {}
+// CHECK: define void @foo.Test08_UI64(i64)
+func Test08_UI64(x uint64) {}
diff --git a/test/irgen/mangling.go b/test/irgen/mangling.go
new file mode 100644
index 0000000..aff73cb
--- /dev/null
+++ b/test/irgen/mangling.go
@@ -0,0 +1,7 @@
+// RUN: llgo -fgo-pkgpath=llvm.org/llvm -S -emit-llvm -o - %s | FileCheck %s
+
+package llvm
+
+// CHECK: @llvm_org_llvm.F
+func F() {
+}
diff --git a/test/lit.cfg b/test/lit.cfg
new file mode 100644
index 0000000..e4639ab
--- /dev/null
+++ b/test/lit.cfg
@@ -0,0 +1,15 @@
+import lit.formats
+import os
+import sys
+
+config.name = 'llgo'
+config.suffixes = ['.go']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = config.llvm_src_root + '/tools/llgo/test'
+config.test_exec_root = config.llvm_obj_root + '/tools/llgo/test'
+config.excludes = ['Inputs']
+
+config.substitutions.append((r"\bllgo\b", config.llvm_obj_root + '/bin/llgo -static-libgo'))
+config.substitutions.append((r"\bFileCheck\b", config.llvm_obj_root + '/bin/FileCheck'))
+config.substitutions.append((r"\bcount\b", config.llvm_obj_root + '/bin/count'))
+config.substitutions.append((r"\bnot\b", config.llvm_obj_root + '/bin/not'))
diff --git a/test/lit.site.cfg.in b/test/lit.site.cfg.in
new file mode 100644
index 0000000..2c38386
--- /dev/null
+++ b/test/lit.site.cfg.in
@@ -0,0 +1,4 @@
+config.llvm_src_root = "@LLVM_SOURCE_DIR@"
+config.llvm_obj_root = "@LLVM_BINARY_DIR@"
+
+lit_config.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg")
diff --git a/update_third_party.sh b/update_third_party.sh
new file mode 100755
index 0000000..c189d40
--- /dev/null
+++ b/update_third_party.sh
@@ -0,0 +1,110 @@
+#!/bin/sh -e
+
+gofrontendrepo=https://code.google.com/p/gofrontend
+gofrontendrev=82f97044669e
+
+gccrepo=svn://gcc.gnu.org/svn/gcc/trunk
+gccrev=216268
+
+gotoolsrepo=https://code.google.com/p/go.tools
+gotoolsrev=40c49f5c2b64
+
+tempdir=$(mktemp -d /tmp/update_third_party.XXXXXX)
+gofrontenddir=$tempdir/gofrontend
+gotoolsdir=$tempdir/go.tools
+
+rm -rf third_party
+mkdir -p third_party/gofrontend third_party/go.tools
+
+# --------------------- gofrontend ---------------------
+
+hg clone -r $gofrontendrev $gofrontendrepo $gofrontenddir
+
+cp -r $gofrontenddir/LICENSE $gofrontenddir/libgo third_party/gofrontend
+
+# Apply a diff that eliminates use of the unnamed struct extension beyond what
+# -fms-extensions supports.
+(cd third_party/gofrontend && patch -p1) < libgo-noext.diff
+find third_party/gofrontend -name '*.orig' | xargs rm
+
+# Remove GPL licensed files.
+rm \
+ third_party/gofrontend/libgo/testsuite/libgo.testmain/testmain.exp \
+ third_party/gofrontend/libgo/testsuite/lib/libgo.exp \
+ third_party/gofrontend/libgo/testsuite/config/default.exp
+
+# --------------------- gcc ---------------------
+
+# Some dependencies are stored in the gcc repository.
+# TODO(pcc): Ask iant about mirroring these dependencies into gofrontend.
+
+for f in config-ml.in depcomp install-sh ltmain.sh missing ; do
+ svn cat -r $gccrev $gccrepo/$f > third_party/gofrontend/$f
+done
+
+mkdir -p third_party/gofrontend/include third_party/gofrontend/libgcc
+
+# Copy in our versions of GCC files.
+cp include/dwarf2.h third_party/gofrontend/include/
+cp include/filenames.h third_party/gofrontend/include/
+cp include/unwind-pe.h third_party/gofrontend/libgcc/
+cp mvifdiff.sh third_party/gofrontend/move-if-change
+
+cp ../../autoconf/config.guess third_party/gofrontend/
+cp ../../autoconf/config.sub third_party/gofrontend/
+
+for d in libbacktrace libffi ; do
+ svn export -r $gccrev $gccrepo/$d third_party/gofrontend/$d
+done
+
+# Remove GPL licensed files, and files that confuse our license check.
+rm \
+ third_party/gofrontend/libffi/ChangeLog \
+ third_party/gofrontend/libffi/doc/libffi.texi \
+ third_party/gofrontend/libffi/msvcc.sh \
+ third_party/gofrontend/libffi/testsuite/lib/libffi.exp \
+ third_party/gofrontend/libffi/testsuite/libffi.call/call.exp \
+ third_party/gofrontend/libffi/testsuite/libffi.special/special.exp \
+ third_party/gofrontend/libffi/testsuite/config/default.exp
+
+# The build requires these files to exist.
+touch \
+ third_party/gofrontend/include/dwarf2.def \
+ third_party/gofrontend/libffi/doc/libffi.texi
+
+# --------------------- go.tools ---------------------
+
+hg clone -r $gotoolsrev $gotoolsrepo $gotoolsdir
+
+cp -r $gotoolsdir/* third_party/go.tools
+
+# Vendor the go.tools repository.
+find third_party/go.tools -name '*.go' | xargs sed -i -e \
+ 's,"golang.org/x/tools/,"llvm.org/llgo/third_party/go.tools/,g'
+
+# Until the version skew between the "go" tool and the compiler is resolved,
+# we patch out Go 1.4 specific code in go.tools.
+sed -i -e '/go1\.4/ d' third_party/go.tools/go/exact/go13.go
+rm third_party/go.tools/go/exact/go14.go
+
+# --------------------- license check ---------------------
+
+# We don't want any GPL licensed code without an autoconf/libtool
+# exception, or any GPLv3 licensed code.
+
+for i in `grep -lr 'General Public License' third_party` ; do
+ if grep -q 'configuration script generated by Autoconf, you may include it under' $i || \
+ grep -q 'is built using GNU Libtool, you may include this file under the' $i ; then
+ :
+ else
+ echo "$i: license check failed"
+ exit 1
+ fi
+done
+
+if grep -qr GPLv3 third_party ; then
+ echo "`grep -lr GPLv3 third_party`: license check failed"
+ exit 1
+fi
+
+rm -rf $tempdir
diff --git a/utils/benchcomp/README b/utils/benchcomp/README
new file mode 100644
index 0000000..38a8a59
--- /dev/null
+++ b/utils/benchcomp/README
@@ -0,0 +1,20 @@
+These are some quick and dirty tools for measuring the performance impact
+of a change to llgo by sampling the results of running the libgo benchmark
+suite. They can be used to calculate the geo-mean and 95% confidence interval
+using the Student's t-test. The benchcomp program massages the output of the
+Go benchmark tools into a form that can be read by the R program analyze.R
+which runs the statistics.
+
+To use, clpatch this into gofrontend:
+https://codereview.appspot.com/103550047/
+
+then run:
+
+make
+make -C workdir/gofrontend_build/libgo-stage1 bench 2>&1 | tee before.out
+# make changes
+make
+make -C workdir/gofrontend_build/libgo-stage1 bench 2>&1 | tee after.out
+utils/benchcomp/benchcomp benchns before.out after.out | R -f utils/benchcomp/analyze.R
+
+The results should be displayed on stdout.
diff --git a/utils/benchcomp/analyze.R b/utils/benchcomp/analyze.R
new file mode 100644
index 0000000..2efa787
--- /dev/null
+++ b/utils/benchcomp/analyze.R
@@ -0,0 +1,15 @@
+sc <- read.table(file('stdin'))
+scratio <- sc$V2 / sc$V3
+scratio <- scratio[scratio > 0]
+
+# Take the log of the ratio. Our null hypothesis is a normal distribution
+# around zero.
+tt <- t.test(log(scratio))
+tt
+
+# This gives us the geo-mean as we are taking the exponent of the linear mean
+# of logarithms.
+1 - 1/exp(tt$estimate)
+
+# Likewise for the confidence interval.
+1 - 1/exp(tt$conf.int)
diff --git a/utils/benchcomp/main.go b/utils/benchcomp/main.go
new file mode 100644
index 0000000..c7e91c2
--- /dev/null
+++ b/utils/benchcomp/main.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "bufio"
+ "debug/elf"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func symsizes(path string) map[string]float64 {
+ m := make(map[string]float64)
+ f, err := elf.Open(path)
+ if err != nil {
+ panic(err.Error())
+ }
+ syms, err := f.Symbols()
+ if err != nil {
+ panic(err.Error())
+ }
+ for _, sym := range syms {
+ if sym.Section < elf.SectionIndex(len(f.Sections)) && f.Sections[sym.Section].Name == ".text" {
+ m[sym.Name] = float64(sym.Size)
+ }
+ }
+ return m
+}
+
+func benchnums(path, stat string) map[string]float64 {
+ m := make(map[string]float64)
+
+ fh, err := os.Open(path)
+ if err != nil {
+ panic(err.Error())
+ }
+
+ scanner := bufio.NewScanner(fh)
+ for scanner.Scan() {
+ elems := strings.Split(scanner.Text(), "\t")
+ if !strings.HasPrefix(elems[0], "Benchmark") || len(elems) < 3 {
+ continue
+ }
+ var s string
+ for _, elem := range elems[2:] {
+ selems := strings.Split(strings.TrimSpace(elem), " ")
+ if selems[1] == stat {
+ s = selems[0]
+ }
+ }
+ if s != "" {
+ ns, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ panic(scanner.Text() + " ---- " + err.Error())
+ }
+ m[elems[0]] = ns
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ panic(err)
+ }
+
+ return m
+}
+
+func main() {
+ var cmp func(string) map[string]float64
+ switch os.Args[1] {
+ case "symsizes":
+ cmp = symsizes
+
+ case "benchns":
+ cmp = func(path string) map[string]float64 {
+ return benchnums(path, "ns/op")
+ }
+
+ case "benchallocs":
+ cmp = func(path string) map[string]float64 {
+ return benchnums(path, "allocs/op")
+ }
+ }
+
+ syms1 := cmp(os.Args[2])
+ syms2 := cmp(os.Args[3])
+
+ for n, z1 := range syms1 {
+ if z2, ok := syms2[n]; ok && z2 != 0 {
+ fmt.Printf("%s %f %f %f\n", n, z1, z2, z1/z2)
+ }
+ }
+}