all: remove the nacl port (part 2, amd64p32 + toolchain)

This is part two if the nacl removal. Part 1 was CL 199499.

This CL removes amd64p32 support, which might be useful in the future
if we implement the x32 ABI. It also removes the nacl bits in the
toolchain, and some remaining nacl bits.

Updates #30439

Change-Id: I2475d5bb066d1b474e00e40d95b520e7c2e286e1
Reviewed-on: https://go-review.googlesource.com/c/go/+/200077
Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
Brad Fitzpatrick 2019-10-09 16:22:47 +00:00
parent 19a7490e56
commit 07b4abd62e
70 changed files with 387 additions and 4072 deletions

View File

@ -94,7 +94,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
if cnt%16 != 0 { if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
} }
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) { } else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
if *state&x0 == 0 { if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0 *state |= x0

View File

@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog
for i := int64(0); i < cnt; i += int64(gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
} }
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) { } else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP p.Reg = arm.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)

View File

@ -247,8 +247,6 @@ var Ctxt *obj.Link
var writearchive bool var writearchive bool
var Nacl bool
var nodfp *Node var nodfp *Node
var disable_checknil int var disable_checknil int

View File

@ -187,7 +187,6 @@ func Main(archInit func(*Arch)) {
// pseudo-package used for methods with anonymous receivers // pseudo-package used for methods with anonymous receivers
gopkg = types.NewPkg("go", "") gopkg = types.NewPkg("go", "")
Nacl = objabi.GOOS == "nacl"
Wasm := objabi.GOARCH == "wasm" Wasm := objabi.GOARCH == "wasm"
// Whether the limit for stack-allocated objects is much smaller than normal. // Whether the limit for stack-allocated objects is much smaller than normal.

View File

@ -38,7 +38,6 @@ type Config struct {
useSSE bool // Use SSE for non-float operations useSSE bool // Use SSE for non-float operations
useAvg bool // Use optimizations that need Avg* operations useAvg bool // Use optimizations that need Avg* operations
useHmul bool // Use optimizations that need Hmul* operations useHmul bool // Use optimizations that need Hmul* operations
nacl bool // GOOS=nacl
use387 bool // GO386=387 use387 bool // GO386=387
SoftFloat bool // SoftFloat bool //
Race bool // race detector enabled Race bool // race detector enabled
@ -339,7 +338,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
} }
c.ctxt = ctxt c.ctxt = ctxt
c.optimize = optimize c.optimize = optimize
c.nacl = objabi.GOOS == "nacl"
c.useSSE = true c.useSSE = true
// Don't use Duff's device nor SSE on Plan 9 AMD64, because // Don't use Duff's device nor SSE on Plan 9 AMD64, because
@ -349,17 +347,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.useSSE = false c.useSSE = false
} }
if c.nacl {
c.noDuffDevice = true // Don't use Duff's device on NaCl
// Returns clobber BP on nacl/386, so the write
// barrier does.
opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 5 // BP
// ... and SI on nacl/amd64.
opcodeTable[OpAMD64LoweredWB].reg.clobbers |= 1 << 6 // SI
}
if ctxt.Flag_shared { if ctxt.Flag_shared {
// LoweredWB is secretly a CALL and CALLs on 386 in // LoweredWB is secretly a CALL and CALLs on 386 in
// shared mode get rewritten by obj6.go to go through // shared mode get rewritten by obj6.go to go through

View File

@ -596,32 +596,32 @@
// into tests for carry flags. // into tests for carry flags.
// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis // ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
// mutandis, for UGE and SETAE, and CC and SETCC. // mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTL x y)) ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTQ x y)) ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> ((ULT|UGE) (BTQ x y))
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl ((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
-> ((ULT|UGE) (BTLconst [log2uint32(c)] x)) -> ((ULT|UGE) (BTLconst [log2uint32(c)] x))
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl ((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
-> ((ULT|UGE) (BTQconst [log2(c)] x)) -> ((ULT|UGE) (BTQconst [log2(c)] x))
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
-> ((ULT|UGE) (BTQconst [log2(c)] x)) -> ((ULT|UGE) (BTQconst [log2(c)] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTL x y)) (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTQ x y)) (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl (SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
-> (SET(B|AE) (BTLconst [log2uint32(c)] x)) -> (SET(B|AE) (BTLconst [log2uint32(c)] x))
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl (SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
-> (SET(B|AE) (BTQconst [log2(c)] x)) -> (SET(B|AE) (BTQconst [log2(c)] x))
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
-> (SET(B|AE) (BTQconst [log2(c)] x)) -> (SET(B|AE) (BTQconst [log2(c)] x))
// SET..store variant // SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
-> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) -> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
-> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
@ -641,29 +641,29 @@
(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem) (SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b) // Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTS(Q|L) x y) (OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTC(Q|L) x y) (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTC(Q|L) x y)
// Convert ORconst into BTS, if the code gets smaller, with boundary being // Convert ORconst into BTS, if the code gets smaller, with boundary being
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes). // (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl ((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Qconst [log2(c)] x) -> (BT(S|C)Qconst [log2(c)] x)
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl ((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Lconst [log2uint32(c)] x) -> (BT(S|C)Lconst [log2uint32(c)] x)
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl ((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Qconst [log2(c)] x) -> (BT(S|C)Qconst [log2(c)] x)
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl ((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Lconst [log2uint32(c)] x) -> (BT(S|C)Lconst [log2uint32(c)] x)
// Recognize bit clearing: a &^= 1<<b // Recognize bit clearing: a &^= 1<<b
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) && !config.nacl -> (BTR(Q|L) x y) (AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) -> (BTR(Q|L) x y)
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl (ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRQconst [log2(^c)] x) -> (BTRQconst [log2(^c)] x)
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl (ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRLconst [log2uint32(^c)] x) -> (BTRLconst [log2uint32(^c)] x)
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl (ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRQconst [log2(^c)] x) -> (BTRQconst [log2(^c)] x)
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl (ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRLconst [log2uint32(^c)] x) -> (BTRLconst [log2uint32(^c)] x)
// Special-case bit patterns on first/last bit. // Special-case bit patterns on first/last bit.
@ -677,40 +677,40 @@
// We thus special-case them, by detecting the shift patterns. // We thus special-case them, by detecting the shift patterns.
// Special case resetting first/last bit // Special case resetting first/last bit
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) && !config.nacl (SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
-> (BTR(L|Q)const [0] x) -> (BTR(L|Q)const [0] x)
(SHRLconst [1] (SHLLconst [1] x)) && !config.nacl (SHRLconst [1] (SHLLconst [1] x))
-> (BTRLconst [31] x) -> (BTRLconst [31] x)
(SHRQconst [1] (SHLQconst [1] x)) && !config.nacl (SHRQconst [1] (SHLQconst [1] x))
-> (BTRQconst [63] x) -> (BTRQconst [63] x)
// Special case testing first/last bit (with double-shift generated by generic.rules) // Special case testing first/last bit (with double-shift generated by generic.rules)
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) -> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x)) -> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x)) -> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x)) -> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
// Special-case manually testing last bit with "a>>63 != 0" (without "&1") // Special-case manually testing last bit with "a>>63 != 0" (without "&1")
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) -> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 && !config.nacl ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x)) -> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) -> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1) // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)

View File

@ -1246,20 +1246,20 @@
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x) ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
// use indexed loads and stores // use indexed loads and stores
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem) (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem) (MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem) (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem) (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem) (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem) (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem) (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem) (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBUloadidx ptr idx mem) (MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBUloadidx ptr idx mem)
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBloadidx ptr idx mem) (MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBloadidx ptr idx mem)
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVBstoreidx ptr idx val mem) (MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVBstoreidx ptr idx val mem)
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHUloadidx ptr idx mem) (MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHUloadidx ptr idx mem)
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHloadidx ptr idx mem) (MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHloadidx ptr idx mem)
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVHstoreidx ptr idx val mem) (MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVHstoreidx ptr idx val mem)
// constant folding in indexed loads and stores // constant folding in indexed loads and stores
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem) (MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)

View File

@ -625,15 +625,6 @@ func (s *regAllocState) init(f *Func) {
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch) s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
} }
} }
if s.f.Config.nacl {
switch s.f.Config.arch {
case "arm":
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
case "amd64p32":
s.allocatable &^= 1 << 5 // BP - reserved for nacl
s.allocatable &^= 1 << 15 // R15 - reserved for nacl
}
}
if s.f.Config.use387 { if s.f.Config.use387 {
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go) s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
} }

File diff suppressed because it is too large Load Diff

View File

@ -6250,8 +6250,6 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
// result: (MOVBUload [off1+off2] {sym} ptr mem) // result: (MOVBUload [off1+off2] {sym} ptr mem)
for { for {
@ -6339,7 +6337,7 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool {
return true return true
} }
// match: (MOVBUload [0] {sym} (ADD ptr idx) mem) // match: (MOVBUload [0] {sym} (ADD ptr idx) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVBUloadidx ptr idx mem) // result: (MOVBUloadidx ptr idx mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -6353,7 +6351,7 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool {
} }
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVBUloadidx) v.reset(OpARMMOVBUloadidx)
@ -6491,8 +6489,6 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { func rewriteValueARM_OpARMMOVBload_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
// result: (MOVBload [off1+off2] {sym} ptr mem) // result: (MOVBload [off1+off2] {sym} ptr mem)
for { for {
@ -6580,7 +6576,7 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool {
return true return true
} }
// match: (MOVBload [0] {sym} (ADD ptr idx) mem) // match: (MOVBload [0] {sym} (ADD ptr idx) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVBloadidx ptr idx mem) // result: (MOVBloadidx ptr idx mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -6594,7 +6590,7 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool {
} }
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVBloadidx) v.reset(OpARMMOVBloadidx)
@ -6721,8 +6717,6 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// result: (MOVBstore [off1+off2] {sym} ptr val mem) // result: (MOVBstore [off1+off2] {sym} ptr val mem)
for { for {
@ -6872,7 +6866,7 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool {
return true return true
} }
// match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVBstoreidx ptr idx val mem) // result: (MOVBstoreidx ptr idx val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -6887,7 +6881,7 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVBstoreidx) v.reset(OpARMMOVBstoreidx)
@ -7350,7 +7344,7 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool {
return true return true
} }
// match: (MOVHUload [0] {sym} (ADD ptr idx) mem) // match: (MOVHUload [0] {sym} (ADD ptr idx) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVHUloadidx ptr idx mem) // result: (MOVHUloadidx ptr idx mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -7364,7 +7358,7 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool {
} }
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVHUloadidx) v.reset(OpARMMOVHUloadidx)
@ -7525,8 +7519,6 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { func rewriteValueARM_OpARMMOVHload_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// result: (MOVHload [off1+off2] {sym} ptr mem) // result: (MOVHload [off1+off2] {sym} ptr mem)
for { for {
@ -7614,7 +7606,7 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool {
return true return true
} }
// match: (MOVHload [0] {sym} (ADD ptr idx) mem) // match: (MOVHload [0] {sym} (ADD ptr idx) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVHloadidx ptr idx mem) // result: (MOVHloadidx ptr idx mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -7628,7 +7620,7 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool {
} }
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVHloadidx) v.reset(OpARMMOVHloadidx)
@ -7801,8 +7793,6 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// result: (MOVHstore [off1+off2] {sym} ptr val mem) // result: (MOVHstore [off1+off2] {sym} ptr val mem)
for { for {
@ -7912,7 +7902,7 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool {
return true return true
} }
// match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVHstoreidx ptr idx val mem) // result: (MOVHstoreidx ptr idx val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -7927,7 +7917,7 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVHstoreidx) v.reset(OpARMMOVHstoreidx)
@ -8069,7 +8059,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
return true return true
} }
// match: (MOVWload [0] {sym} (ADD ptr idx) mem) // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWloadidx ptr idx mem) // result: (MOVWloadidx ptr idx mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8083,7 +8073,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
} }
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWloadidx) v.reset(OpARMMOVWloadidx)
@ -8093,7 +8083,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
return true return true
} }
// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWloadshiftLL ptr idx [c] mem) // result: (MOVWloadshiftLL ptr idx [c] mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8108,7 +8098,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
c := v_0.AuxInt c := v_0.AuxInt
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWloadshiftLL) v.reset(OpARMMOVWloadshiftLL)
@ -8119,7 +8109,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
return true return true
} }
// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWloadshiftRL ptr idx [c] mem) // result: (MOVWloadshiftRL ptr idx [c] mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8134,7 +8124,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
c := v_0.AuxInt c := v_0.AuxInt
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWloadshiftRL) v.reset(OpARMMOVWloadshiftRL)
@ -8145,7 +8135,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
return true return true
} }
// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWloadshiftRA ptr idx [c] mem) // result: (MOVWloadshiftRA ptr idx [c] mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8160,7 +8150,7 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
c := v_0.AuxInt c := v_0.AuxInt
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWloadshiftRA) v.reset(OpARMMOVWloadshiftRA)
@ -8524,8 +8514,6 @@ func rewriteValueARM_OpARMMOVWreg_0(v *Value) bool {
return false return false
} }
func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool { func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// result: (MOVWstore [off1+off2] {sym} ptr val mem) // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for { for {
@ -8595,7 +8583,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
return true return true
} }
// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem) // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWstoreidx ptr idx val mem) // result: (MOVWstoreidx ptr idx val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8610,7 +8598,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWstoreidx) v.reset(OpARMMOVWstoreidx)
@ -8621,7 +8609,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
return true return true
} }
// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWstoreshiftLL ptr idx [c] val mem) // result: (MOVWstoreshiftLL ptr idx [c] val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8637,7 +8625,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWstoreshiftLL) v.reset(OpARMMOVWstoreshiftLL)
@ -8649,7 +8637,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
return true return true
} }
// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWstoreshiftRL ptr idx [c] val mem) // result: (MOVWstoreshiftRL ptr idx [c] val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8665,7 +8653,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWstoreshiftRL) v.reset(OpARMMOVWstoreshiftRL)
@ -8677,7 +8665,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
return true return true
} }
// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
// cond: sym == nil && !config.nacl // cond: sym == nil
// result: (MOVWstoreshiftRA ptr idx [c] val mem) // result: (MOVWstoreshiftRA ptr idx [c] val mem)
for { for {
if v.AuxInt != 0 { if v.AuxInt != 0 {
@ -8693,7 +8681,7 @@ func rewriteValueARM_OpARMMOVWstore_0(v *Value) bool {
idx := v_0.Args[1] idx := v_0.Args[1]
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
if !(sym == nil && !config.nacl) { if !(sym == nil) {
break break
} }
v.reset(OpARMMOVWstoreshiftRA) v.reset(OpARMMOVWstoreshiftRA)

View File

@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog
for i := int64(0); i < cnt; i += int64(gc.Widthreg) { for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
} }
} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) { } else if cnt <= int64(128*gc.Widthreg) {
p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg))) p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
p.To.Sym = gc.Duffzero p.To.Sym = gc.Duffzero

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// js and nacl do not support inter-process file locking. // js does not support inter-process file locking.
// +build !js // +build !js
package lockedfile_test package lockedfile_test

View File

@ -92,8 +92,7 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, s
if a.buildID != "" { if a.buildID != "" {
gcargs = append(gcargs, "-buildid", a.buildID) gcargs = append(gcargs, "-buildid", a.buildID)
} }
platform := cfg.Goos + "/" + cfg.Goarch if p.Internal.OmitDebug || cfg.Goos == "plan9" || cfg.Goarch == "wasm" {
if p.Internal.OmitDebug || platform == "nacl/amd64p32" || cfg.Goos == "plan9" || cfg.Goarch == "wasm" {
gcargs = append(gcargs, "-dwarf=false") gcargs = append(gcargs, "-dwarf=false")
} }
if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") {

View File

@ -365,276 +365,6 @@ var (
// The code that shifts the value << 28 has the responsibility // The code that shifts the value << 28 has the responsibility
// for XORing with C_SCOND_XOR too. // for XORing with C_SCOND_XOR too.
// asmoutnacl assembles the instruction p. It replaces asmout for NaCl.
// It returns the total number of bytes put in out, and it can change
// p->pc if extra padding is necessary.
// In rare cases, asmoutnacl might split p into two instructions.
// origPC is the PC for this Prog (no padding is taken into account).
func (c *ctxt5) asmoutnacl(origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
size := int(o.size)
// instruction specific
switch p.As {
default:
if out != nil {
c.asmout(p, o, out)
}
case ADATABUNDLE, // align to 16-byte boundary
ADATABUNDLEEND: // zero width instruction, just to align next instruction to 16-byte boundary
p.Pc = (p.Pc + 15) &^ 15
if out != nil {
c.asmout(p, o, out)
}
case obj.AUNDEF,
APLD:
size = 4
if out != nil {
switch p.As {
case obj.AUNDEF:
out[0] = 0xe7fedef0 // NACL_INSTR_ARM_ABORT_NOW (UDF #0xEDE0)
case APLD:
out[0] = 0xe1a01001 // (MOVW R1, R1)
}
}
case AB, ABL:
if p.To.Type != obj.TYPE_MEM {
if out != nil {
c.asmout(p, o, out)
}
} else {
if p.To.Offset != 0 || size != 4 || p.To.Reg > REG_R15 || p.To.Reg < REG_R0 {
c.ctxt.Diag("unsupported instruction: %v", p)
}
if p.Pc&15 == 12 {
p.Pc += 4
}
if out != nil {
out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c0013f | (uint32(p.To.Reg)&15)<<12 | (uint32(p.To.Reg)&15)<<16 // BIC $0xc000000f, Rx
if p.As == AB {
out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff10 | (uint32(p.To.Reg)&15)<<0 // BX Rx
} else { // ABL
out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff30 | (uint32(p.To.Reg)&15)<<0 // BLX Rx
}
}
size = 8
}
// align the last instruction (the actual BL) to the last instruction in a bundle
if p.As == ABL {
if p.To.Sym == deferreturn {
p.Pc = ((int64(origPC) + 15) &^ 15) + 16 - int64(size)
} else {
p.Pc += (16 - ((p.Pc + int64(size)) & 15)) & 15
}
}
case ALDREX,
ALDREXD,
AMOVB,
AMOVBS,
AMOVBU,
AMOVD,
AMOVF,
AMOVH,
AMOVHS,
AMOVHU,
AMOVM,
AMOVW,
ASTREX,
ASTREXD:
if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 && p.From.Reg == REG_R13 { // MOVW.W x(R13), PC
if out != nil {
c.asmout(p, o, out)
}
if size == 4 {
if out != nil {
// Note: 5c and 5g reg.c know that DIV/MOD smashes R12
// so that this return instruction expansion is valid.
out[0] = out[0] &^ 0x3000 // change PC to R12
out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
out[2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
}
size += 8
if (p.Pc+int64(size))&15 == 4 {
p.Pc += 4
}
break
} else {
// if the instruction used more than 4 bytes, then it must have used a very large
// offset to update R13, so we need to additionally mask R13.
if out != nil {
out[size/4-1] &^= 0x3000 // change PC to R12
out[size/4] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03cdd103 // BIC $0xc0000000, R13
out[size/4+1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
out[size/4+2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
}
// p->pc+size is only ok at 4 or 12 mod 16.
if (p.Pc+int64(size))%8 == 0 {
p.Pc += 4
}
size += 12
break
}
}
if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 {
c.ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
}
if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_R13 && (p.Scond&C_WBIT != 0) && size > 4 {
// function prolog with very large frame size: MOVW.W R14,-100004(R13)
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
q := c.newprog()
p.Scond &^= C_WBIT
*q = *p
a := &p.To
var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
a2 = &q.From
}
nocache(q)
nocache(p)
// insert q after p
q.Link = p.Link
p.Link = q
q.Pcond = nil
// make p into ADD $X, R13
p.As = AADD
p.From = *a
p.From.Reg = 0
p.From.Type = obj.TYPE_CONST
p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R13
// make q into p but load/store from 0(R13)
q.Spadj = 0
*a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R13
a2.Sym = nil
a2.Offset = 0
size = int(c.oplook(p).size)
break
}
if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R9) || // MOVW Rx, X(Ry), y != 9
(p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R9) { // MOVW X(Rx), Ry, x != 9
var a *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a = &p.To
} else {
a = &p.From
}
reg := int(a.Reg)
if size == 4 {
// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
if reg == 0 {
if out != nil {
c.asmout(p, o, out)
}
} else {
if out != nil {
out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c00103 | (uint32(reg)&15)<<16 | (uint32(reg)&15)<<12 // BIC $0xc0000000, Rx
}
if p.Pc&15 == 12 {
p.Pc += 4
}
size += 4
if out != nil {
c.asmout(p, o, out[1:])
}
}
break
} else {
// if a load/store instruction takes more than 1 word to implement, then
// we need to separate the instruction into two:
// 1. explicitly load the address into R11.
// 2. load/store from R11.
// This won't handle .W/.P, so we should reject such code.
if p.Scond&(C_PBIT|C_WBIT) != 0 {
c.ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
q := c.newprog()
*q = *p
var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
a2 = &q.From
}
nocache(q)
nocache(p)
// insert q after p
q.Link = p.Link
p.Link = q
q.Pcond = nil
// make p into MOVW $X(R), R11
p.As = AMOVW
p.From = *a
p.From.Type = obj.TYPE_ADDR
p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R11
// make q into p but load/store from 0(R11)
*a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R11
a2.Sym = nil
a2.Offset = 0
size = int(c.oplook(p).size)
break
}
} else if out != nil {
c.asmout(p, o, out)
}
}
// destination register specific
if p.To.Type == obj.TYPE_REG {
switch p.To.Reg {
case REG_R9:
c.ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
case REG_R13:
if out != nil {
out[size/4] = 0xe3cdd103 // BIC $0xc0000000, R13
}
if (p.Pc+int64(size))&15 == 0 {
p.Pc += 4
}
size += 4
}
}
return size
}
func checkSuffix(c *ctxt5, p *obj.Prog, o *Optab) { func checkSuffix(c *ctxt5, p *obj.Prog, o *Optab) {
if p.Scond&C_SBIT != 0 && o.scond&C_SBIT == 0 { if p.Scond&C_SBIT != 0 && o.scond&C_SBIT == 0 {
c.ctxt.Diag("invalid .S suffix: %v", p) c.ctxt.Diag("invalid .S suffix: %v", p)
@ -685,13 +415,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.Pc = int64(pc) p.Pc = int64(pc)
o = c.oplook(p) o = c.oplook(p)
if ctxt.Headtype != objabi.Hnacl { m = int(o.size)
m = int(o.size)
} else {
m = c.asmoutnacl(pc, p, o, nil)
pc = int32(p.Pc) // asmoutnacl might change pc for alignment
o = c.oplook(p) // asmoutnacl might change p in rare cases
}
if m%4 != 0 || p.Pc%4 != 0 { if m%4 != 0 || p.Pc%4 != 0 {
ctxt.Diag("!pc invalid: %v size=%d", p, m) ctxt.Diag("!pc invalid: %v size=%d", p, m)
@ -783,12 +507,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
} }
*/ */
opc = int32(p.Pc) opc = int32(p.Pc)
m = int(o.size)
if ctxt.Headtype != objabi.Hnacl {
m = int(o.size)
} else {
m = c.asmoutnacl(pc, p, o, nil)
}
if p.Pc != int64(opc) { if p.Pc != int64(opc) {
bflag = 1 bflag = 1
} }
@ -844,15 +563,8 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
c.pc = p.Pc c.pc = p.Pc
o = c.oplook(p) o = c.oplook(p)
opc = int32(p.Pc) opc = int32(p.Pc)
if ctxt.Headtype != objabi.Hnacl { c.asmout(p, o, out[:])
c.asmout(p, o, out[:]) m = int(o.size)
m = int(o.size)
} else {
m = c.asmoutnacl(pc, p, o, out[:])
if int64(opc) != p.Pc {
ctxt.Diag("asmoutnacl broken: pc changed (%d->%d) in last stage: %v", opc, int32(p.Pc), p)
}
}
if m%4 != 0 || p.Pc%4 != 0 { if m%4 != 0 || p.Pc%4 != 0 {
ctxt.Diag("final stage: pc invalid: %v size=%d", p, m) ctxt.Diag("final stage: pc invalid: %v size=%d", p, m)
@ -934,14 +646,6 @@ func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool {
} else if force == 0 && (p.Pc+int64(12+c.pool.size)-int64(c.pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size } else if force == 0 && (p.Pc+int64(12+c.pool.size)-int64(c.pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
return false return false
} }
if c.ctxt.Headtype == objabi.Hnacl && c.pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
q := c.newprog()
q.As = ADATABUNDLEEND
c.elitrl.Link = q
c.elitrl = q
}
// The line number for constant pool entries doesn't really matter. // The line number for constant pool entries doesn't really matter.
// We set it to the line number of the preceding instruction so that // We set it to the line number of the preceding instruction so that
@ -1002,22 +706,6 @@ func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) {
} }
} }
if c.ctxt.Headtype == objabi.Hnacl && c.pool.size%16 == 0 {
// start a new data bundle
q := c.newprog()
q.As = ADATABUNDLE
q.Pc = int64(c.pool.size)
c.pool.size += 4
if c.blitrl == nil {
c.blitrl = q
c.pool.start = uint32(p.Pc)
} else {
c.elitrl.Link = q
}
c.elitrl = q
}
q := c.newprog() q := c.newprog()
*q = *t *q = *t
q.Pc = int64(c.pool.size) q.Pc = int64(c.pool.size)
@ -1286,15 +974,11 @@ func (c *ctxt5) aclass(a *obj.Addr) int {
if uint32(c.instoffset) <= 0xffff && objabi.GOARM == 7 { if uint32(c.instoffset) <= 0xffff && objabi.GOARM == 7 {
return C_SCON return C_SCON
} }
if c.ctxt.Headtype != objabi.Hnacl { if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 {
// Don't split instructions on NaCl. The validator is not return C_RCON2A
// happy with it. See Issue 20595. }
if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 { if y, x := immrot2s(uint32(c.instoffset)); x != 0 && y != 0 {
return C_RCON2A return C_RCON2S
}
if y, x := immrot2s(uint32(c.instoffset)); x != 0 && y != 0 {
return C_RCON2S
}
} }
return C_LCON return C_LCON

View File

@ -1834,12 +1834,6 @@ func fillnop(p []byte, n int) {
} }
} }
func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
s.Grow(int64(c) + int64(pad))
fillnop(s.P[c:], int(pad))
return c + pad
}
func spadjop(ctxt *obj.Link, l, q obj.As) obj.As { func spadjop(ctxt *obj.Link, l, q obj.As) obj.As {
if ctxt.Arch.Family != sys.AMD64 || ctxt.Arch.PtrSize == 4 { if ctxt.Arch.Family != sys.AMD64 || ctxt.Arch.PtrSize == 4 {
return l return l
@ -1905,39 +1899,6 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
s.P = s.P[:0] s.P = s.P[:0]
c = 0 c = 0
for p := s.Func.Text; p != nil; p = p.Link { for p := s.Func.Text; p != nil; p = p.Link {
if ctxt.Headtype == objabi.Hnacl && p.Isize > 0 {
// pad everything to avoid crossing 32-byte boundary
if c>>5 != (c+int32(p.Isize)-1)>>5 {
c = naclpad(ctxt, s, c, -c&31)
}
// pad call deferreturn to start at 32-byte boundary
// so that subtracting 5 in jmpdefer will jump back
// to that boundary and rerun the call.
if p.As == obj.ACALL && p.To.Sym == deferreturn {
c = naclpad(ctxt, s, c, -c&31)
}
// pad call to end at 32-byte boundary
if p.As == obj.ACALL {
c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
}
// the linker treats REP and STOSQ as different instructions
// but in fact the REP is a prefix on the STOSQ.
// make sure REP has room for 2 more bytes, so that
// padding will not be inserted before the next instruction.
if (p.As == AREP || p.As == AREPN) && c>>5 != (c+3-1)>>5 {
c = naclpad(ctxt, s, c, -c&31)
}
// same for LOCK.
// various instructions follow; the longest is 4 bytes.
// give ourselves 8 bytes so as to avoid surprises.
if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
c = naclpad(ctxt, s, c, -c&31)
}
}
if (p.Back&branchLoopHead != 0) && c&(loopAlign-1) != 0 { if (p.Back&branchLoopHead != 0) && c&(loopAlign-1) != 0 {
// pad with NOPs // pad with NOPs
@ -1978,11 +1939,6 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
m := ab.Len() m := ab.Len()
if int(p.Isize) != m { if int(p.Isize) != m {
p.Isize = uint8(m) p.Isize = uint8(m)
// When building for NaCl, we currently need
// at least 2 rounds to ensure proper 32-byte alignment.
if ctxt.Headtype == objabi.Hnacl {
reAssemble = true
}
} }
s.Grow(p.Pc + int64(m)) s.Grow(p.Pc + int64(m))
@ -2003,10 +1959,6 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
} }
} }
if ctxt.Headtype == objabi.Hnacl {
c = naclpad(ctxt, s, c, -c&31)
}
s.Size = int64(c) s.Size = int64(c)
if false { /* debug['a'] > 1 */ if false { /* debug['a'] > 1 */
@ -2041,8 +1993,6 @@ func instinit(ctxt *obj.Link) {
switch ctxt.Headtype { switch ctxt.Headtype {
case objabi.Hplan9: case objabi.Hplan9:
plan9privates = ctxt.Lookup("_privates") plan9privates = ctxt.Lookup("_privates")
case objabi.Hnacl:
deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
} }
for i := range avxOptab { for i := range avxOptab {
@ -4878,8 +4828,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {
default: default:
log.Fatalf("unknown TLS base location for %v", ctxt.Headtype) log.Fatalf("unknown TLS base location for %v", ctxt.Headtype)
case objabi.Hlinux, case objabi.Hlinux, objabi.Hfreebsd:
objabi.Hnacl, objabi.Hfreebsd:
if ctxt.Flag_shared { if ctxt.Flag_shared {
// Note that this is not generating the same insns as the other cases. // Note that this is not generating the same insns as the other cases.
// MOV TLS, dst // MOV TLS, dst
@ -5175,166 +5124,9 @@ func isbadbyte(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && (REG_BP <= a.Reg && a.Reg <= REG_DI || REG_BPB <= a.Reg && a.Reg <= REG_DIB) return a.Type == obj.TYPE_REG && (REG_BP <= a.Reg && a.Reg <= REG_DI || REG_BPB <= a.Reg && a.Reg <= REG_DIB)
} }
var naclret = []uint8{
0x5e, // POPL SI
// 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
0x83,
0xe6,
0xe0, // ANDL $~31, SI
0x4c,
0x01,
0xfe, // ADDQ R15, SI
0xff,
0xe6, // JMP SI
}
var naclret8 = []uint8{
0x5d, // POPL BP
// 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
0x83,
0xe5,
0xe0, // ANDL $~31, BP
0xff,
0xe5, // JMP BP
}
var naclspfix = []uint8{0x4c, 0x01, 0xfc} // ADDQ R15, SP
var naclbpfix = []uint8{0x4c, 0x01, 0xfd} // ADDQ R15, BP
var naclmovs = []uint8{
0x89,
0xf6, // MOVL SI, SI
0x49,
0x8d,
0x34,
0x37, // LEAQ (R15)(SI*1), SI
0x89,
0xff, // MOVL DI, DI
0x49,
0x8d,
0x3c,
0x3f, // LEAQ (R15)(DI*1), DI
}
var naclstos = []uint8{
0x89,
0xff, // MOVL DI, DI
0x49,
0x8d,
0x3c,
0x3f, // LEAQ (R15)(DI*1), DI
}
func (ab *AsmBuf) nacltrunc(ctxt *obj.Link, reg int) {
if reg >= REG_R8 {
ab.Put1(0x45)
}
reg = (reg - REG_AX) & 7
ab.Put2(0x89, byte(3<<6|reg<<3|reg))
}
func (ab *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { func (ab *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {
ab.Reset() ab.Reset()
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.I386 {
switch p.As {
case obj.ARET:
ab.Put(naclret8)
return
case obj.ACALL,
obj.AJMP:
if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
ab.Put3(0x83, byte(0xe0|(p.To.Reg-REG_AX)), 0xe0)
}
case AINT:
ab.Put1(0xf4)
return
}
}
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
if p.As == AREP {
ab.rep = true
return
}
if p.As == AREPN {
ab.repn = true
return
}
if p.As == ALOCK {
ab.lock = true
return
}
if p.As != ALEAQ && p.As != ALEAL {
if p.From.Index != REG_NONE && p.From.Scale > 0 {
ab.nacltrunc(ctxt, int(p.From.Index))
}
if p.To.Index != REG_NONE && p.To.Scale > 0 {
ab.nacltrunc(ctxt, int(p.To.Index))
}
}
switch p.As {
case obj.ARET:
ab.Put(naclret)
return
case obj.ACALL,
obj.AJMP:
if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
// ANDL $~31, reg
ab.Put3(0x83, byte(0xe0|(p.To.Reg-REG_AX)), 0xe0)
// ADDQ R15, reg
ab.Put3(0x4c, 0x01, byte(0xf8|(p.To.Reg-REG_AX)))
}
if p.To.Type == obj.TYPE_REG && REG_R8 <= p.To.Reg && p.To.Reg <= REG_R15 {
// ANDL $~31, reg
ab.Put4(0x41, 0x83, byte(0xe0|(p.To.Reg-REG_R8)), 0xe0)
// ADDQ R15, reg
ab.Put3(0x4d, 0x01, byte(0xf8|(p.To.Reg-REG_R8)))
}
case AINT:
ab.Put1(0xf4)
return
case ASCASB,
ASCASW,
ASCASL,
ASCASQ,
ASTOSB,
ASTOSW,
ASTOSL,
ASTOSQ:
ab.Put(naclstos)
case AMOVSB, AMOVSW, AMOVSL, AMOVSQ:
ab.Put(naclmovs)
}
if ab.rep {
ab.Put1(0xf3)
ab.rep = false
}
if ab.repn {
ab.Put1(0xf2)
ab.repn = false
}
if ab.lock {
ab.Put1(0xf0)
ab.lock = false
}
}
ab.rexflag = 0 ab.rexflag = 0
ab.vexflag = false ab.vexflag = false
ab.evexflag = false ab.evexflag = false
@ -5393,15 +5185,6 @@ func (ab *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) {
} }
} }
if ctxt.Arch.Family == sys.AMD64 && ctxt.Headtype == objabi.Hnacl && p.As != ACMPL && p.As != ACMPQ && p.To.Type == obj.TYPE_REG {
switch p.To.Reg {
case REG_SP:
ab.Put(naclspfix)
case REG_BP:
ab.Put(naclbpfix)
}
}
} }
// unpackOps4 extracts 4 operands from p. // unpackOps4 extracts 4 operands from p.

View File

@ -48,7 +48,6 @@ func CanUse1InsnTLS(ctxt *obj.Link) bool {
if ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
switch ctxt.Headtype { switch ctxt.Headtype {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hnacl,
objabi.Hplan9, objabi.Hplan9,
objabi.Hwindows: objabi.Hwindows:
return false return false
@ -208,14 +207,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
} }
} }
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
if p.GetFrom3() != nil {
nacladdr(ctxt, p, p.GetFrom3())
}
nacladdr(ctxt, p, &p.From)
nacladdr(ctxt, p, &p.To)
}
// Rewrite float constants to values stored in memory. // Rewrite float constants to values stored in memory.
switch p.As { switch p.As {
// Convert AMOVSS $(0), Xx to AXORPS Xx, Xx // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
@ -568,38 +559,6 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
obj.Nopout(p) obj.Nopout(p)
} }
func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
if p.As == ALEAL || p.As == ALEAQ {
return
}
if a.Reg == REG_BP {
ctxt.Diag("invalid address: %v", p)
return
}
if a.Reg == REG_TLS {
a.Reg = REG_BP
}
if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
switch a.Reg {
// all ok
case REG_BP, REG_SP, REG_R15:
break
default:
if a.Index != REG_NONE {
ctxt.Diag("invalid address %v", p)
}
a.Index = a.Reg
if a.Index != REG_NONE {
a.Scale = 1
}
a.Reg = REG_R15
}
}
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { if cursym.Func.Text == nil || cursym.Func.Text.Link == nil {
return return
@ -762,13 +721,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_BX p.To.Reg = REG_BX
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
p.As = AMOVL
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_R15
p.From.Scale = 1
p.From.Index = REG_CX
}
if ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
p.As = AMOVL p.As = AMOVL
} }
@ -780,7 +732,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From.Reg = REG_BX p.From.Reg = REG_BX
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_BX p.To.Reg = REG_BX
if ctxt.Headtype == objabi.Hnacl || ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
p.As = ATESTL p.As = ATESTL
} }
@ -807,7 +759,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_DI p.To.Reg = REG_DI
if ctxt.Headtype == objabi.Hnacl || ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
p.As = ALEAL p.As = ALEAL
} }
@ -822,13 +774,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.From.Offset = 0 // Panic.argp p.From.Offset = 0 // Panic.argp
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = REG_DI p.To.Reg = REG_DI
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
p.As = ACMPL
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_R15
p.From.Scale = 1
p.From.Index = REG_BX
}
if ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
p.As = ACMPL p.As = ACMPL
} }
@ -847,13 +792,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = REG_BX p.To.Reg = REG_BX
p.To.Offset = 0 // Panic.argp p.To.Offset = 0 // Panic.argp
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
p.As = AMOVL
p.To.Type = obj.TYPE_MEM
p.To.Reg = REG_R15
p.To.Scale = 1
p.To.Index = REG_BX
}
if ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
p.As = AMOVL p.As = AMOVL
} }
@ -988,14 +926,6 @@ func isZeroArgRuntimeCall(s *obj.LSym) bool {
} }
func indir_cx(ctxt *obj.Link, a *obj.Addr) { func indir_cx(ctxt *obj.Link, a *obj.Addr) {
if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 {
a.Type = obj.TYPE_MEM
a.Reg = REG_R15
a.Index = REG_CX
a.Scale = 1
return
}
a.Type = obj.TYPE_MEM a.Type = obj.TYPE_MEM
a.Reg = REG_CX a.Reg = REG_CX
} }
@ -1040,7 +970,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
mov := AMOVQ mov := AMOVQ
sub := ASUBQ sub := ASUBQ
if ctxt.Headtype == objabi.Hnacl || ctxt.Arch.Family == sys.I386 { if ctxt.Arch.Family == sys.I386 {
cmp = ACMPL cmp = ACMPL
lea = ALEAL lea = ALEAL
mov = AMOVL mov = AMOVL

View File

@ -42,7 +42,6 @@ const (
Hfreebsd Hfreebsd
Hjs Hjs
Hlinux Hlinux
Hnacl
Hnetbsd Hnetbsd
Hopenbsd Hopenbsd
Hplan9 Hplan9
@ -65,8 +64,6 @@ func (h *HeadType) Set(s string) error {
*h = Hjs *h = Hjs
case "linux", "android": case "linux", "android":
*h = Hlinux *h = Hlinux
case "nacl":
*h = Hnacl
case "netbsd": case "netbsd":
*h = Hnetbsd *h = Hnetbsd
case "openbsd": case "openbsd":
@ -97,8 +94,6 @@ func (h *HeadType) String() string {
return "js" return "js"
case Hlinux: case Hlinux:
return "linux" return "linux"
case Hnacl:
return "nacl"
case Hnetbsd: case Hnetbsd:
return "netbsd" return "netbsd"
case Hopenbsd: case Hopenbsd:

View File

@ -127,7 +127,7 @@ func init() {
} }
func Framepointer_enabled(goos, goarch string) bool { func Framepointer_enabled(goos, goarch string) bool {
return framepointer_enabled != 0 && (goarch == "amd64" && goos != "nacl" || goarch == "arm64" && goos == "linux") return framepointer_enabled != 0 && (goarch == "amd64" || goarch == "arm64" && goos == "linux")
} }
func addexp(s string) { func addexp(s string) {

View File

@ -731,8 +731,7 @@ func asmb2(ctxt *ld.Link) {
objabi.Hsolaris: objabi.Hsolaris:
ld.Flag8 = true /* 64-bit addresses */ ld.Flag8 = true /* 64-bit addresses */
case objabi.Hnacl, case objabi.Hwindows:
objabi.Hwindows:
break break
} }
@ -758,8 +757,7 @@ func asmb2(ctxt *ld.Link) {
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd,
objabi.Hdragonfly, objabi.Hdragonfly,
objabi.Hsolaris, objabi.Hsolaris:
objabi.Hnacl:
symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
symo = ld.Rnd(symo, int64(*ld.FlagRound)) symo = ld.Rnd(symo, int64(*ld.FlagRound))
@ -838,8 +836,7 @@ func asmb2(ctxt *ld.Link) {
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd,
objabi.Hdragonfly, objabi.Hdragonfly,
objabi.Hsolaris, objabi.Hsolaris:
objabi.Hnacl:
ld.Asmbelf(ctxt, symo) ld.Asmbelf(ctxt, symo)
case objabi.Hwindows: case objabi.Hwindows:

View File

@ -113,18 +113,6 @@ func archinit(ctxt *ld.Link) {
*ld.FlagRound = 4096 *ld.FlagRound = 4096
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
*ld.FlagW = true // disable dwarf, which gets confused and is useless anyway
ld.HEADR = 0x10000
ld.Funcalign = 32
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
case objabi.Hwindows: /* PE executable */ case objabi.Hwindows: /* PE executable */
// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
return return

View File

@ -894,8 +894,7 @@ func asmb2(ctxt *ld.Link) {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd:
objabi.Hnacl:
ld.Asmbelf(ctxt, int64(symo)) ld.Asmbelf(ctxt, int64(symo))
case objabi.Hdarwin: case objabi.Hdarwin:

View File

@ -100,17 +100,6 @@ func archinit(ctxt *ld.Link) {
*ld.FlagRound = 0x10000 *ld.FlagRound = 0x10000
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
ld.HEADR = 0x10000
ld.Funcalign = 16
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
case objabi.Hdarwin: /* apple MACH */ case objabi.Hdarwin: /* apple MACH */
ld.HEADR = ld.INITIAL_MACHO_HEADR ld.HEADR = ld.INITIAL_MACHO_HEADR
if *ld.FlagTextAddr == -1 { if *ld.FlagTextAddr == -1 {

View File

@ -928,8 +928,7 @@ func asmb2(ctxt *ld.Link) {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd:
objabi.Hnacl:
ld.Asmbelf(ctxt, int64(symo)) ld.Asmbelf(ctxt, int64(symo))
case objabi.Hdarwin: case objabi.Hdarwin:

View File

@ -105,16 +105,5 @@ func archinit(ctxt *ld.Link) {
if *ld.FlagRound == -1 { if *ld.FlagRound == -1 {
*ld.FlagRound = 4096 *ld.FlagRound = 4096
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
ld.HEADR = 0x10000
ld.Funcalign = 16
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
} }
} }

View File

@ -2168,9 +2168,6 @@ func (ctxt *Link) address() []*sym.Segment {
} }
Segtext.Length = va - uint64(*FlagTextAddr) Segtext.Length = va - uint64(*FlagTextAddr)
if ctxt.HeadType == objabi.Hnacl {
va += 32 // room for the "halt sled"
}
if len(Segrodata.Sections) > 0 { if len(Segrodata.Sections) > 0 {
// align to page boundary so as not to mix // align to page boundary so as not to mix

View File

@ -1822,9 +1822,8 @@ func Asmbelf(ctxt *Link, symo int64) {
/* /*
* PHDR must be in a loaded segment. Adjust the text * PHDR must be in a loaded segment. Adjust the text
* segment boundaries downwards to include it. * segment boundaries downwards to include it.
* Except on NaCl where it must not be loaded.
*/ */
if ctxt.HeadType != objabi.Hnacl { {
o := int64(Segtext.Vaddr - pph.vaddr) o := int64(Segtext.Vaddr - pph.vaddr)
Segtext.Vaddr -= uint64(o) Segtext.Vaddr -= uint64(o)
Segtext.Length += uint64(o) Segtext.Length += uint64(o)

View File

@ -70,38 +70,23 @@ func (ctxt *Link) computeTLSOffset() {
case objabi.Hplan9, objabi.Hwindows, objabi.Hjs, objabi.Haix: case objabi.Hplan9, objabi.Hwindows, objabi.Hjs, objabi.Haix:
break break
/*
* ELF uses TLS offset negative from FS.
* Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
* Known to low-level assembly in package runtime and runtime/cgo.
*/
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd,
objabi.Hdragonfly, objabi.Hdragonfly,
objabi.Hsolaris: objabi.Hsolaris:
/*
* ELF uses TLS offset negative from FS.
* Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
* Known to low-level assembly in package runtime and runtime/cgo.
*/
ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize
case objabi.Hnacl: case objabi.Hdarwin:
switch ctxt.Arch.Family {
default:
log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
case sys.ARM:
ctxt.Tlsoffset = 0
case sys.AMD64:
ctxt.Tlsoffset = 0
case sys.I386:
ctxt.Tlsoffset = -8
}
/* /*
* OS X system constants - offset from 0(GS) to our TLS. * OS X system constants - offset from 0(GS) to our TLS.
*/ */
case objabi.Hdarwin:
switch ctxt.Arch.Family { switch ctxt.Arch.Family {
default: default:
log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name) log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name)

View File

@ -285,8 +285,7 @@ func asmb2(ctxt *ld.Link) {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd:
objabi.Hnacl:
ld.Asmbelf(ctxt, int64(symo)) ld.Asmbelf(ctxt, int64(symo))
} }

View File

@ -94,16 +94,5 @@ func archinit(ctxt *ld.Link) {
if *ld.FlagRound == -1 { if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000 *ld.FlagRound = 0x10000
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
ld.HEADR = 0x10000
ld.Funcalign = 16
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
} }
} }

View File

@ -1183,8 +1183,7 @@ func asmb2(ctxt *ld.Link) {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd:
objabi.Hnacl:
ld.Asmbelf(ctxt, int64(symo)) ld.Asmbelf(ctxt, int64(symo))
case objabi.Haix: case objabi.Haix:

View File

@ -100,17 +100,6 @@ func archinit(ctxt *ld.Link) {
*ld.FlagRound = 0x10000 *ld.FlagRound = 0x10000
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
ld.HEADR = 0x10000
ld.Funcalign = 16
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
case objabi.Haix: case objabi.Haix:
ld.Xcoffinit(ctxt) ld.Xcoffinit(ctxt)
} }

View File

@ -760,8 +760,7 @@ func asmb2(ctxt *ld.Link) {
case objabi.Hlinux, case objabi.Hlinux,
objabi.Hfreebsd, objabi.Hfreebsd,
objabi.Hnetbsd, objabi.Hnetbsd,
objabi.Hopenbsd, objabi.Hopenbsd:
objabi.Hnacl:
ld.Asmbelf(ctxt, int64(symo)) ld.Asmbelf(ctxt, int64(symo))
case objabi.Hwindows: case objabi.Hwindows:

View File

@ -106,17 +106,6 @@ func archinit(ctxt *ld.Link) {
*ld.FlagRound = 4096 *ld.FlagRound = 4096
} }
case objabi.Hnacl:
ld.Elfinit(ctxt)
ld.HEADR = 0x10000
ld.Funcalign = 32
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = 0x20000
}
if *ld.FlagRound == -1 {
*ld.FlagRound = 0x10000
}
case objabi.Hwindows: /* PE executable */ case objabi.Hwindows: /* PE executable */
// ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
return return

View File

@ -1,184 +0,0 @@
// Original source:
// http://www.zorinaq.com/papers/md5-amd64.html
// http://www.zorinaq.com/papers/md5-amd64.tar.bz2
//
// Translated from Perl generating GNU assembly into
// #defines generating 6a assembly by the Go Authors.
//
// Restrictions to make code safe for Native Client:
// replace BP with R11, reloaded before use at return.
// replace R15 with R11.
#include "textflag.h"
// MD5 optimized for AMD64.
//
// Author: Marc Bevand <bevand_m (at) epita.fr>
// Licence: I hereby disclaim the copyright on this code and place it
// in the public domain.
TEXT ·block(SB),NOSPLIT,$0-16
MOVL dig+0(FP), R11
MOVL p+4(FP), SI
MOVL p_len+8(FP), DX
SHRQ $6, DX
SHLQ $6, DX
LEAQ (SI)(DX*1), DI
MOVL (0*4)(R11), AX
MOVL (1*4)(R11), BX
MOVL (2*4)(R11), CX
MOVL (3*4)(R11), DX
CMPQ SI, DI
JEQ end
loop:
MOVL AX, R12
MOVL BX, R13
MOVL CX, R14
MOVL DX, R11
MOVL (0*4)(SI), R8
MOVL DX, R9
#define ROUND1(a, b, c, d, index, const, shift) \
XORL c, R9; \
LEAL const(a)(R8*1), a; \
ANDL b, R9; \
XORL d, R9; \
MOVL (index*4)(SI), R8; \
ADDL R9, a; \
ROLL $shift, a; \
MOVL c, R9; \
ADDL b, a
ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7);
ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12);
ROUND1(CX,DX,AX,BX, 3,0x242070db,17);
ROUND1(BX,CX,DX,AX, 4,0xc1bdceee,22);
ROUND1(AX,BX,CX,DX, 5,0xf57c0faf, 7);
ROUND1(DX,AX,BX,CX, 6,0x4787c62a,12);
ROUND1(CX,DX,AX,BX, 7,0xa8304613,17);
ROUND1(BX,CX,DX,AX, 8,0xfd469501,22);
ROUND1(AX,BX,CX,DX, 9,0x698098d8, 7);
ROUND1(DX,AX,BX,CX,10,0x8b44f7af,12);
ROUND1(CX,DX,AX,BX,11,0xffff5bb1,17);
ROUND1(BX,CX,DX,AX,12,0x895cd7be,22);
ROUND1(AX,BX,CX,DX,13,0x6b901122, 7);
ROUND1(DX,AX,BX,CX,14,0xfd987193,12);
ROUND1(CX,DX,AX,BX,15,0xa679438e,17);
ROUND1(BX,CX,DX,AX, 0,0x49b40821,22);
MOVL (1*4)(SI), R8
MOVL DX, R9
MOVL DX, R10
#define ROUND2(a, b, c, d, index, const, shift) \
NOTL R9; \
LEAL const(a)(R8*1),a; \
ANDL b, R10; \
ANDL c, R9; \
MOVL (index*4)(SI),R8; \
ORL R9, R10; \
MOVL c, R9; \
ADDL R10, a; \
MOVL c, R10; \
ROLL $shift, a; \
ADDL b, a
ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5);
ROUND2(DX,AX,BX,CX,11,0xc040b340, 9);
ROUND2(CX,DX,AX,BX, 0,0x265e5a51,14);
ROUND2(BX,CX,DX,AX, 5,0xe9b6c7aa,20);
ROUND2(AX,BX,CX,DX,10,0xd62f105d, 5);
ROUND2(DX,AX,BX,CX,15, 0x2441453, 9);
ROUND2(CX,DX,AX,BX, 4,0xd8a1e681,14);
ROUND2(BX,CX,DX,AX, 9,0xe7d3fbc8,20);
ROUND2(AX,BX,CX,DX,14,0x21e1cde6, 5);
ROUND2(DX,AX,BX,CX, 3,0xc33707d6, 9);
ROUND2(CX,DX,AX,BX, 8,0xf4d50d87,14);
ROUND2(BX,CX,DX,AX,13,0x455a14ed,20);
ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5);
ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9);
ROUND2(CX,DX,AX,BX,12,0x676f02d9,14);
ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20);
MOVL (5*4)(SI), R8
MOVL CX, R9
#define ROUND3(a, b, c, d, index, const, shift) \
LEAL const(a)(R8*1),a; \
MOVL (index*4)(SI),R8; \
XORL d, R9; \
XORL b, R9; \
ADDL R9, a; \
ROLL $shift, a; \
MOVL b, R9; \
ADDL b, a
ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4);
ROUND3(DX,AX,BX,CX,11,0x8771f681,11);
ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16);
ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23);
ROUND3(AX,BX,CX,DX, 4,0xa4beea44, 4);
ROUND3(DX,AX,BX,CX, 7,0x4bdecfa9,11);
ROUND3(CX,DX,AX,BX,10,0xf6bb4b60,16);
ROUND3(BX,CX,DX,AX,13,0xbebfbc70,23);
ROUND3(AX,BX,CX,DX, 0,0x289b7ec6, 4);
ROUND3(DX,AX,BX,CX, 3,0xeaa127fa,11);
ROUND3(CX,DX,AX,BX, 6,0xd4ef3085,16);
ROUND3(BX,CX,DX,AX, 9, 0x4881d05,23);
ROUND3(AX,BX,CX,DX,12,0xd9d4d039, 4);
ROUND3(DX,AX,BX,CX,15,0xe6db99e5,11);
ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16);
ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23);
MOVL (0*4)(SI), R8
MOVL $0xffffffff, R9
XORL DX, R9
#define ROUND4(a, b, c, d, index, const, shift) \
LEAL const(a)(R8*1),a; \
ORL b, R9; \
XORL c, R9; \
ADDL R9, a; \
MOVL (index*4)(SI),R8; \
MOVL $0xffffffff, R9; \
ROLL $shift, a; \
XORL c, R9; \
ADDL b, a
ROUND4(AX,BX,CX,DX, 7,0xf4292244, 6);
ROUND4(DX,AX,BX,CX,14,0x432aff97,10);
ROUND4(CX,DX,AX,BX, 5,0xab9423a7,15);
ROUND4(BX,CX,DX,AX,12,0xfc93a039,21);
ROUND4(AX,BX,CX,DX, 3,0x655b59c3, 6);
ROUND4(DX,AX,BX,CX,10,0x8f0ccc92,10);
ROUND4(CX,DX,AX,BX, 1,0xffeff47d,15);
ROUND4(BX,CX,DX,AX, 8,0x85845dd1,21);
ROUND4(AX,BX,CX,DX,15,0x6fa87e4f, 6);
ROUND4(DX,AX,BX,CX, 6,0xfe2ce6e0,10);
ROUND4(CX,DX,AX,BX,13,0xa3014314,15);
ROUND4(BX,CX,DX,AX, 4,0x4e0811a1,21);
ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6);
ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10);
ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15);
ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21);
ADDL R12, AX
ADDL R13, BX
ADDL R14, CX
ADDL R11, DX
ADDQ $64, SI
CMPQ SI, DI
JB loop
end:
MOVL dig+0(FP), R11
MOVL AX, (0*4)(R11)
MOVL BX, (1*4)(R11)
MOVL CX, (2*4)(R11)
MOVL DX, (3*4)(R11)
RET

View File

@ -1,216 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// SHA-1 block routine. See sha1block.go for Go equivalent.
//
// There are 80 rounds of 4 types:
// - rounds 0-15 are type 1 and load data (ROUND1 macro).
// - rounds 16-19 are type 1 and do not load data (ROUND1x macro).
// - rounds 20-39 are type 2 and do not load data (ROUND2 macro).
// - rounds 40-59 are type 3 and do not load data (ROUND3 macro).
// - rounds 60-79 are type 4 and do not load data (ROUND4 macro).
//
// Each round loads or shuffles the data, then computes a per-round
// function of b, c, d, and then mixes the result into and rotates the
// five registers a, b, c, d, e holding the intermediate results.
//
// The register rotation is implemented by rotating the arguments to
// the round macros instead of by explicit move instructions.
//
// amd64p32 version.
// To ensure safety for Native Client, avoids use of BP and R15
// as well as two-register addressing modes.
#define LOAD(index) \
MOVL (index*4)(SI), R10; \
BSWAPL R10; \
MOVL R10, (index*4)(SP)
#define SHUFFLE(index) \
MOVL (((index)&0xf)*4)(SP), R10; \
XORL (((index-3)&0xf)*4)(SP), R10; \
XORL (((index-8)&0xf)*4)(SP), R10; \
XORL (((index-14)&0xf)*4)(SP), R10; \
ROLL $1, R10; \
MOVL R10, (((index)&0xf)*4)(SP)
#define FUNC1(a, b, c, d, e) \
MOVL d, R9; \
XORL c, R9; \
ANDL b, R9; \
XORL d, R9
#define FUNC2(a, b, c, d, e) \
MOVL b, R9; \
XORL c, R9; \
XORL d, R9
#define FUNC3(a, b, c, d, e) \
MOVL b, R8; \
ORL c, R8; \
ANDL d, R8; \
MOVL b, R9; \
ANDL c, R9; \
ORL R8, R9
#define FUNC4 FUNC2
#define MIX(a, b, c, d, e, const) \
ROLL $30, b; \
ADDL R9, e; \
MOVL a, R8; \
ROLL $5, R8; \
LEAL const(e)(R10*1), e; \
ADDL R8, e
#define ROUND1(a, b, c, d, e, index) \
LOAD(index); \
FUNC1(a, b, c, d, e); \
MIX(a, b, c, d, e, 0x5A827999)
#define ROUND1x(a, b, c, d, e, index) \
SHUFFLE(index); \
FUNC1(a, b, c, d, e); \
MIX(a, b, c, d, e, 0x5A827999)
#define ROUND2(a, b, c, d, e, index) \
SHUFFLE(index); \
FUNC2(a, b, c, d, e); \
MIX(a, b, c, d, e, 0x6ED9EBA1)
#define ROUND3(a, b, c, d, e, index) \
SHUFFLE(index); \
FUNC3(a, b, c, d, e); \
MIX(a, b, c, d, e, 0x8F1BBCDC)
#define ROUND4(a, b, c, d, e, index) \
SHUFFLE(index); \
FUNC4(a, b, c, d, e); \
MIX(a, b, c, d, e, 0xCA62C1D6)
TEXT ·block(SB),NOSPLIT,$64-16
MOVL dig+0(FP), R14
MOVL p_base+4(FP), SI
MOVL p_len+8(FP), DX
SHRQ $6, DX
SHLQ $6, DX
LEAQ (SI)(DX*1), DI
MOVL (0*4)(R14), AX
MOVL (1*4)(R14), BX
MOVL (2*4)(R14), CX
MOVL (3*4)(R14), DX
MOVL (4*4)(R14), R13
CMPQ SI, DI
JEQ end
loop:
#define BP R13 /* keep diff from sha1block_amd64.s small */
ROUND1(AX, BX, CX, DX, BP, 0)
ROUND1(BP, AX, BX, CX, DX, 1)
ROUND1(DX, BP, AX, BX, CX, 2)
ROUND1(CX, DX, BP, AX, BX, 3)
ROUND1(BX, CX, DX, BP, AX, 4)
ROUND1(AX, BX, CX, DX, BP, 5)
ROUND1(BP, AX, BX, CX, DX, 6)
ROUND1(DX, BP, AX, BX, CX, 7)
ROUND1(CX, DX, BP, AX, BX, 8)
ROUND1(BX, CX, DX, BP, AX, 9)
ROUND1(AX, BX, CX, DX, BP, 10)
ROUND1(BP, AX, BX, CX, DX, 11)
ROUND1(DX, BP, AX, BX, CX, 12)
ROUND1(CX, DX, BP, AX, BX, 13)
ROUND1(BX, CX, DX, BP, AX, 14)
ROUND1(AX, BX, CX, DX, BP, 15)
ROUND1x(BP, AX, BX, CX, DX, 16)
ROUND1x(DX, BP, AX, BX, CX, 17)
ROUND1x(CX, DX, BP, AX, BX, 18)
ROUND1x(BX, CX, DX, BP, AX, 19)
ROUND2(AX, BX, CX, DX, BP, 20)
ROUND2(BP, AX, BX, CX, DX, 21)
ROUND2(DX, BP, AX, BX, CX, 22)
ROUND2(CX, DX, BP, AX, BX, 23)
ROUND2(BX, CX, DX, BP, AX, 24)
ROUND2(AX, BX, CX, DX, BP, 25)
ROUND2(BP, AX, BX, CX, DX, 26)
ROUND2(DX, BP, AX, BX, CX, 27)
ROUND2(CX, DX, BP, AX, BX, 28)
ROUND2(BX, CX, DX, BP, AX, 29)
ROUND2(AX, BX, CX, DX, BP, 30)
ROUND2(BP, AX, BX, CX, DX, 31)
ROUND2(DX, BP, AX, BX, CX, 32)
ROUND2(CX, DX, BP, AX, BX, 33)
ROUND2(BX, CX, DX, BP, AX, 34)
ROUND2(AX, BX, CX, DX, BP, 35)
ROUND2(BP, AX, BX, CX, DX, 36)
ROUND2(DX, BP, AX, BX, CX, 37)
ROUND2(CX, DX, BP, AX, BX, 38)
ROUND2(BX, CX, DX, BP, AX, 39)
ROUND3(AX, BX, CX, DX, BP, 40)
ROUND3(BP, AX, BX, CX, DX, 41)
ROUND3(DX, BP, AX, BX, CX, 42)
ROUND3(CX, DX, BP, AX, BX, 43)
ROUND3(BX, CX, DX, BP, AX, 44)
ROUND3(AX, BX, CX, DX, BP, 45)
ROUND3(BP, AX, BX, CX, DX, 46)
ROUND3(DX, BP, AX, BX, CX, 47)
ROUND3(CX, DX, BP, AX, BX, 48)
ROUND3(BX, CX, DX, BP, AX, 49)
ROUND3(AX, BX, CX, DX, BP, 50)
ROUND3(BP, AX, BX, CX, DX, 51)
ROUND3(DX, BP, AX, BX, CX, 52)
ROUND3(CX, DX, BP, AX, BX, 53)
ROUND3(BX, CX, DX, BP, AX, 54)
ROUND3(AX, BX, CX, DX, BP, 55)
ROUND3(BP, AX, BX, CX, DX, 56)
ROUND3(DX, BP, AX, BX, CX, 57)
ROUND3(CX, DX, BP, AX, BX, 58)
ROUND3(BX, CX, DX, BP, AX, 59)
ROUND4(AX, BX, CX, DX, BP, 60)
ROUND4(BP, AX, BX, CX, DX, 61)
ROUND4(DX, BP, AX, BX, CX, 62)
ROUND4(CX, DX, BP, AX, BX, 63)
ROUND4(BX, CX, DX, BP, AX, 64)
ROUND4(AX, BX, CX, DX, BP, 65)
ROUND4(BP, AX, BX, CX, DX, 66)
ROUND4(DX, BP, AX, BX, CX, 67)
ROUND4(CX, DX, BP, AX, BX, 68)
ROUND4(BX, CX, DX, BP, AX, 69)
ROUND4(AX, BX, CX, DX, BP, 70)
ROUND4(BP, AX, BX, CX, DX, 71)
ROUND4(DX, BP, AX, BX, CX, 72)
ROUND4(CX, DX, BP, AX, BX, 73)
ROUND4(BX, CX, DX, BP, AX, 74)
ROUND4(AX, BX, CX, DX, BP, 75)
ROUND4(BP, AX, BX, CX, DX, 76)
ROUND4(DX, BP, AX, BX, CX, 77)
ROUND4(CX, DX, BP, AX, BX, 78)
ROUND4(BX, CX, DX, BP, AX, 79)
#undef BP
ADDL (0*4)(R14), AX
ADDL (1*4)(R14), BX
ADDL (2*4)(R14), CX
ADDL (3*4)(R14), DX
ADDL (4*4)(R14), R13
MOVL AX, (0*4)(R14)
MOVL BX, (1*4)(R14)
MOVL CX, (2*4)(R14)
MOVL DX, (3*4)(R14)
MOVL R13, (4*4)(R14)
ADDQ $64, SI
CMPQ SI, DI
JB loop
end:
RET

View File

@ -1,37 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crc32
import "internal/cpu"
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// CRC.
// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
func archAvailableCastagnoli() bool {
return cpu.X86.HasSSE42
}
func archInitCastagnoli() {
if !cpu.X86.HasSSE42 {
panic("not available")
}
// No initialization necessary.
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !cpu.X86.HasSSE42 {
panic("not available")
}
return castagnoliSSE42(crc, p)
}
func archAvailableIEEE() bool { return false }
func archInitIEEE() { panic("not available") }
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }

View File

@ -1,53 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func castagnoliSSE42(crc uint32, p []byte) uint32
TEXT ·castagnoliSSE42(SB),NOSPLIT,$0
MOVL crc+0(FP), AX // CRC value
MOVL p+4(FP), SI // data pointer
MOVL p_len+8(FP), CX // len(p)
NOTL AX
/* If there's less than 8 bytes to process, we do it byte-by-byte. */
CMPQ CX, $8
JL cleanup
/* Process individual bytes until the input is 8-byte aligned. */
startup:
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
CRC32B (SI), AX
DECQ CX
INCQ SI
JMP startup
aligned:
/* The input is now 8-byte aligned and we can process 8-byte chunks. */
CMPQ CX, $8
JL cleanup
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
cleanup:
/* We may have some bytes left over that we process one at a time. */
CMPQ CX, $0
JE done
CRC32B (SI), AX
INCQ SI
DECQ CX
JMP cleanup
done:
NOTL AX
MOVL AX, ret+16(FP)
RET

View File

@ -1,142 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
TEXT ·Compare(SB),NOSPLIT,$0-28
MOVL a_base+0(FP), SI
MOVL a_len+4(FP), BX
MOVL b_base+12(FP), DI
MOVL b_len+16(FP), DX
CALL cmpbody<>(SB)
MOVL AX, ret+24(FP)
RET
TEXT runtime·cmpstring(SB),NOSPLIT,$0-20
MOVL a_base+0(FP), SI
MOVL a_len+4(FP), BX
MOVL b_base+8(FP), DI
MOVL b_len+12(FP), DX
CALL cmpbody<>(SB)
MOVL AX, ret+16(FP)
RET
// input:
// SI = a
// DI = b
// BX = alen
// DX = blen
// output:
// AX = 1/0/-1
TEXT cmpbody<>(SB),NOSPLIT,$0-0
CMPQ SI, DI
JEQ allsame
CMPQ BX, DX
MOVQ DX, R8
CMOVQLT BX, R8 // R8 = min(alen, blen) = # of bytes to compare
CMPQ R8, $8
JB small
loop:
CMPQ R8, $16
JBE _0through16
MOVOU (SI), X0
MOVOU (DI), X1
PCMPEQB X0, X1
PMOVMSKB X1, AX
XORQ $0xffff, AX // convert EQ to NE
JNE diff16 // branch if at least one byte is not equal
ADDQ $16, SI
ADDQ $16, DI
SUBQ $16, R8
JMP loop
// AX = bit mask of differences
diff16:
BSFQ AX, BX // index of first byte that differs
XORQ AX, AX
ADDQ BX, SI
MOVB (SI), CX
ADDQ BX, DI
CMPB CX, (DI)
SETHI AX
LEAQ -1(AX*2), AX // convert 1/0 to +1/-1
RET
// 0 through 16 bytes left, alen>=8, blen>=8
_0through16:
CMPQ R8, $8
JBE _0through8
MOVQ (SI), AX
MOVQ (DI), CX
CMPQ AX, CX
JNE diff8
_0through8:
ADDQ R8, SI
ADDQ R8, DI
MOVQ -8(SI), AX
MOVQ -8(DI), CX
CMPQ AX, CX
JEQ allsame
// AX and CX contain parts of a and b that differ.
diff8:
BSWAPQ AX // reverse order of bytes
BSWAPQ CX
XORQ AX, CX
BSRQ CX, CX // index of highest bit difference
SHRQ CX, AX // move a's bit to bottom
ANDQ $1, AX // mask bit
LEAQ -1(AX*2), AX // 1/0 => +1/-1
RET
// 0-7 bytes in common
small:
LEAQ (R8*8), CX // bytes left -> bits left
NEGQ CX // - bits lift (== 64 - bits left mod 64)
JEQ allsame
// load bytes of a into high bytes of AX
CMPB SI, $0xf8
JA si_high
MOVQ (SI), SI
JMP si_finish
si_high:
ADDQ R8, SI
MOVQ -8(SI), SI
SHRQ CX, SI
si_finish:
SHLQ CX, SI
// load bytes of b in to high bytes of BX
CMPB DI, $0xf8
JA di_high
MOVQ (DI), DI
JMP di_finish
di_high:
ADDQ R8, DI
MOVQ -8(DI), DI
SHRQ CX, DI
di_finish:
SHLQ CX, DI
BSWAPQ SI // reverse order of bytes
BSWAPQ DI
XORQ SI, DI // find bit differences
JEQ allsame
BSRQ DI, CX // index of highest bit difference
SHRQ CX, SI // move a's bit to bottom
ANDQ $1, SI // mask bit
LEAQ -1(SI*2), AX // 1/0 => +1/-1
RET
allsame:
XORQ AX, AX
XORQ CX, CX
CMPQ BX, DX
SETGT AX // 1 if alen > blen
SETEQ CX // 1 if alen == blen
LEAQ -1(CX)(AX*2), AX // 1,0,-1 result
RET

View File

@ -1,132 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
// memequal(a, b unsafe.Pointer, size uintptr) bool
TEXT runtime·memequal(SB),NOSPLIT,$0-17
MOVL a+0(FP), SI
MOVL b+4(FP), DI
CMPL SI, DI
JEQ eq
MOVL size+8(FP), BX
CALL memeqbody<>(SB)
MOVB AX, ret+16(FP)
RET
eq:
MOVB $1, ret+16(FP)
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
MOVL a+0(FP), SI
MOVL b+4(FP), DI
CMPL SI, DI
JEQ eq
MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
CALL memeqbody<>(SB)
MOVB AX, ret+8(FP)
RET
eq:
MOVB $1, ret+8(FP)
RET
// a in SI
// b in DI
// count in BX
TEXT memeqbody<>(SB),NOSPLIT,$0-0
XORQ AX, AX
CMPQ BX, $8
JB small
// 64 bytes at a time using xmm registers
hugeloop:
CMPQ BX, $64
JB bigloop
MOVOU (SI), X0
MOVOU (DI), X1
MOVOU 16(SI), X2
MOVOU 16(DI), X3
MOVOU 32(SI), X4
MOVOU 32(DI), X5
MOVOU 48(SI), X6
MOVOU 48(DI), X7
PCMPEQB X1, X0
PCMPEQB X3, X2
PCMPEQB X5, X4
PCMPEQB X7, X6
PAND X2, X0
PAND X6, X4
PAND X4, X0
PMOVMSKB X0, DX
ADDQ $64, SI
ADDQ $64, DI
SUBQ $64, BX
CMPL DX, $0xffff
JEQ hugeloop
RET
// 8 bytes at a time using 64-bit register
bigloop:
CMPQ BX, $8
JBE leftover
MOVQ (SI), CX
MOVQ (DI), DX
ADDQ $8, SI
ADDQ $8, DI
SUBQ $8, BX
CMPQ CX, DX
JEQ bigloop
RET
// remaining 0-8 bytes
leftover:
ADDQ BX, SI
ADDQ BX, DI
MOVQ -8(SI), CX
MOVQ -8(DI), DX
CMPQ CX, DX
SETEQ AX
RET
small:
CMPQ BX, $0
JEQ equal
LEAQ 0(BX*8), CX
NEGQ CX
CMPB SI, $0xf8
JA si_high
// load at SI won't cross a page boundary.
MOVQ (SI), SI
JMP si_finish
si_high:
// address ends in 11111xxx. Load up to bytes we want, move to correct position.
MOVQ BX, DX
ADDQ SI, DX
MOVQ -8(DX), SI
SHRQ CX, SI
si_finish:
// same for DI.
CMPB DI, $0xf8
JA di_high
MOVQ (DI), DI
JMP di_finish
di_high:
MOVQ BX, DX
ADDQ DI, DX
MOVQ -8(DX), DI
SHRQ CX, DI
di_finish:
SUBQ SI, DI
SHLQ CX, DI
equal:
SETEQ AX
RET

View File

@ -1,113 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
TEXT ·IndexByte(SB),NOSPLIT,$0-20
MOVL b_base+0(FP), SI
MOVL b_len+4(FP), BX
MOVB c+12(FP), AL
CALL indexbytebody<>(SB)
MOVL AX, ret+16(FP)
RET
TEXT ·IndexByteString(SB),NOSPLIT,$0-20
MOVL s_base+0(FP), SI
MOVL s_len+4(FP), BX
MOVB c+8(FP), AL
CALL indexbytebody<>(SB)
MOVL AX, ret+16(FP)
RET
// input:
// SI: data
// BX: data len
// AL: byte sought
// output:
// AX
TEXT indexbytebody<>(SB),NOSPLIT,$0
MOVL SI, DI
CMPL BX, $16
JLT small
// round up to first 16-byte boundary
TESTL $15, SI
JZ aligned
MOVL SI, CX
ANDL $~15, CX
ADDL $16, CX
// search the beginning
SUBL SI, CX
REPN; SCASB
JZ success
// DI is 16-byte aligned; get ready to search using SSE instructions
aligned:
// round down to last 16-byte boundary
MOVL BX, R11
ADDL SI, R11
ANDL $~15, R11
// shuffle X0 around so that each byte contains c
MOVD AX, X0
PUNPCKLBW X0, X0
PUNPCKLBW X0, X0
PSHUFL $0, X0, X0
JMP condition
sse:
// move the next 16-byte chunk of the buffer into X1
MOVO (DI), X1
// compare bytes in X0 to X1
PCMPEQB X0, X1
// take the top bit of each byte in X1 and put the result in DX
PMOVMSKB X1, DX
TESTL DX, DX
JNZ ssesuccess
ADDL $16, DI
condition:
CMPL DI, R11
JNE sse
// search the end
MOVL SI, CX
ADDL BX, CX
SUBL R11, CX
// if CX == 0, the zero flag will be set and we'll end up
// returning a false success
JZ failure
REPN; SCASB
JZ success
failure:
MOVL $-1, AX
RET
// handle for lengths < 16
small:
MOVL BX, CX
REPN; SCASB
JZ success
MOVL $-1, AX
RET
// we've found the chunk containing the byte
// now just figure out which specific byte it is
ssesuccess:
// get the index of the least significant set bit
BSFW DX, DX
SUBL SI, DI
ADDL DI, DX
MOVL DX, AX
RET
success:
SUBL SI, DI
SUBL $1, DI
MOVL DI, AX
RET

View File

@ -1,7 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const GOARCH = "amd64p32"

View File

@ -1,40 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !math_big_pure_go
#include "textflag.h"
TEXT ·mulWW(SB),NOSPLIT,$0
JMP ·mulWW_g(SB)
TEXT ·divWW(SB),NOSPLIT,$0
JMP ·divWW_g(SB)
TEXT ·addVV(SB),NOSPLIT,$0
JMP ·addVV_g(SB)
TEXT ·subVV(SB),NOSPLIT,$0
JMP ·subVV_g(SB)
TEXT ·addVW(SB),NOSPLIT,$0
JMP ·addVW_g(SB)
TEXT ·subVW(SB),NOSPLIT,$0
JMP ·subVW_g(SB)
TEXT ·shlVU(SB),NOSPLIT,$0
JMP ·shlVU_g(SB)
TEXT ·shrVU(SB),NOSPLIT,$0
JMP ·shrVU_g(SB)
TEXT ·mulAddVWW(SB),NOSPLIT,$0
JMP ·mulAddVWW_g(SB)
TEXT ·addMulVVW(SB),NOSPLIT,$0
JMP ·addMulVVW_g(SB)
TEXT ·divWVW(SB),NOSPLIT,$0
JMP ·divWVW_g(SB)

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "dim_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "exp_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "floor_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "hypot_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "log_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "sqrt_amd64.s"

View File

@ -1,5 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "stubs_amd64.s"

View File

@ -1,36 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
#include "funcdata.h"
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
MOVB $0, 12(SP)
LEAL 12(SP), AX
MOVL AX, 8(SP)
CALL ·callReflect(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
MOVB $0, 12(SP)
LEAL 12(SP), AX
MOVL AX, 8(SP)
CALL ·callMethod(SB)
RET

View File

@ -2168,11 +2168,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
var ptrdata uintptr var ptrdata uintptr
var overflowPad uintptr var overflowPad uintptr
// On NaCl, pad if needed to make overflow end at the proper struct alignment.
// On other systems, align > ptrSize is not possible.
if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
overflowPad = ptrSize
}
size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
panic("reflect: bad size computation in MapOf") panic("reflect: bad size computation in MapOf")

View File

@ -1,759 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
TEXT runtime·rt0_go(SB),NOSPLIT,$0
// copy arguments forward on an even stack
MOVL SP, CX
MOVL 8(CX), AX // argc
MOVL 12(CX), BX // argv
SUBL $128, CX // plenty of scratch
ANDL $~15, CX
MOVL CX, SP
MOVL AX, 16(SP)
MOVL BX, 24(SP)
// create istack out of the given (operating system) stack.
MOVL $runtime·g0(SB), DI
LEAL (-64*1024+104)(SP), BX
MOVL BX, g_stackguard0(DI)
MOVL BX, g_stackguard1(DI)
MOVL BX, (g_stack+stack_lo)(DI)
MOVL SP, (g_stack+stack_hi)(DI)
// find out information about the processor we're on
MOVL $0, AX
CPUID
CMPL AX, $0
JE nocpuinfo
CMPL BX, $0x756E6547 // "Genu"
JNE notintel
CMPL DX, $0x49656E69 // "ineI"
JNE notintel
CMPL CX, $0x6C65746E // "ntel"
JNE notintel
MOVB $1, runtime·isIntel(SB)
notintel:
// Load EAX=1 cpuid flags
MOVL $1, AX
CPUID
MOVL AX, runtime·processorVersionInfo(SB)
nocpuinfo:
LEAL runtime·m0+m_tls(SB), DI
CALL runtime·settls(SB)
// store through it, to make sure it works
get_tls(BX)
MOVQ $0x123, g(BX)
MOVQ runtime·m0+m_tls(SB), AX
CMPQ AX, $0x123
JEQ 2(PC)
CALL runtime·abort(SB)
ok:
// set the per-goroutine and per-mach "registers"
get_tls(BX)
LEAL runtime·g0(SB), CX
MOVL CX, g(BX)
LEAL runtime·m0(SB), AX
// save m->g0 = g0
MOVL CX, m_g0(AX)
// save m0 to g0->m
MOVL AX, g_m(CX)
CLD // convention is D is always left cleared
CALL runtime·check(SB)
MOVL 16(SP), AX // copy argc
MOVL AX, 0(SP)
MOVL 24(SP), AX // copy argv
MOVL AX, 4(SP)
CALL runtime·args(SB)
CALL runtime·osinit(SB)
CALL runtime·schedinit(SB)
// create a new goroutine to start program
MOVL $runtime·mainPC(SB), AX // entry
MOVL $0, 0(SP)
MOVL AX, 4(SP)
CALL runtime·newproc(SB)
// start this M
CALL runtime·mstart(SB)
MOVL $0xf1, 0xf1 // crash
RET
DATA runtime·mainPC+0(SB)/4,$runtime·main(SB)
GLOBL runtime·mainPC(SB),RODATA,$4
TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
INT $3
RET
TEXT runtime·asminit(SB),NOSPLIT,$0-0
// No per-thread init.
RET
/*
* go-routine
*/
// void gosave(Gobuf*)
// save state in Gobuf; setjmp
TEXT runtime·gosave(SB), NOSPLIT, $0-4
MOVL buf+0(FP), AX // gobuf
LEAL buf+0(FP), BX // caller's SP
MOVL BX, gobuf_sp(AX)
MOVL 0(SP), BX // caller's PC
MOVL BX, gobuf_pc(AX)
MOVQ $0, gobuf_ret(AX)
// Assert ctxt is zero. See func save.
MOVL gobuf_ctxt(AX), BX
TESTL BX, BX
JZ 2(PC)
CALL runtime·badctxt(SB)
get_tls(CX)
MOVL g(CX), BX
MOVL BX, gobuf_g(AX)
RET
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
TEXT runtime·gogo(SB), NOSPLIT, $8-4
MOVL buf+0(FP), BX // gobuf
MOVL gobuf_g(BX), DX
MOVL 0(DX), CX // make sure g != nil
get_tls(CX)
MOVL DX, g(CX)
MOVL gobuf_sp(BX), SP // restore SP
MOVL gobuf_ctxt(BX), DX
MOVQ gobuf_ret(BX), AX
MOVL $0, gobuf_sp(BX) // clear to help garbage collector
MOVQ $0, gobuf_ret(BX)
MOVL $0, gobuf_ctxt(BX)
MOVL gobuf_pc(BX), BX
JMP BX
// func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
TEXT runtime·mcall(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI
get_tls(CX)
MOVL g(CX), AX // save state in g->sched
MOVL 0(SP), BX // caller's PC
MOVL BX, (g_sched+gobuf_pc)(AX)
LEAL fn+0(FP), BX // caller's SP
MOVL BX, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX)
// switch to m->g0 & its stack, call fn
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVL m_g0(BX), SI
CMPL SI, AX // if g == m->g0 call badmcall
JNE 3(PC)
MOVL $runtime·badmcall(SB), AX
JMP AX
MOVL SI, g(CX) // g = m->g0
MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX
MOVL DI, DX
MOVL 0(DI), DI
CALL DI
POPQ AX
MOVL $runtime·badmcall2(SB), AX
JMP AX
RET
// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
// at the top of the system stack because the one at the top of
// the system stack terminates the stack walk (see topofstack()).
TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET
// func systemstack(fn func())
TEXT runtime·systemstack(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI // DI = fn
get_tls(CX)
MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m
CMPL AX, m_gsignal(BX)
JEQ noswitch
MOVL m_g0(BX), DX // DX = g0
CMPL AX, DX
JEQ noswitch
CMPL AX, m_curg(BX)
JNE bad
// switch stacks
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
MOVL $runtime·systemstack_switch(SB), SI
MOVL SI, (g_sched+gobuf_pc)(AX)
MOVL SP, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX)
// switch to g0
MOVL DX, g(CX)
MOVL (g_sched+gobuf_sp)(DX), SP
// call target function
MOVL DI, DX
MOVL 0(DI), DI
CALL DI
// switch back to g
get_tls(CX)
MOVL g(CX), AX
MOVL g_m(AX), BX
MOVL m_curg(BX), AX
MOVL AX, g(CX)
MOVL (g_sched+gobuf_sp)(AX), SP
MOVL $0, (g_sched+gobuf_sp)(AX)
RET
noswitch:
// already on m stack, just call directly
// Using a tail call here cleans up tracebacks since we won't stop
// at an intermediate systemstack.
MOVL DI, DX
MOVL 0(DI), DI
JMP DI
bad:
// Not g0, not curg. Must be gsignal, but that's not allowed.
// Hide call from linker nosplit analysis.
MOVL $runtime·badsystemstack(SB), AX
CALL AX
INT $3
/*
* support for morestack
*/
// Called during function prolog when more stack is needed.
//
// The traceback routines see morestack on a g0 as being
// the top of a stack (for example, morestack calling newstack
// calling the scheduler calling newm calling gc), so we must
// record an argument size. For that purpose, it has no arguments.
TEXT runtime·morestack(SB),NOSPLIT,$0-0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
// Cannot grow scheduler stack (m->g0).
MOVL m_g0(BX), SI
CMPL g(CX), SI
JNE 3(PC)
CALL runtime·badmorestackg0(SB)
MOVL 0, AX
// Cannot grow signal stack (m->gsignal).
MOVL m_gsignal(BX), SI
CMPL g(CX), SI
JNE 3(PC)
CALL runtime·badmorestackgsignal(SB)
MOVL 0, AX
// Called from f.
// Set m->morebuf to f's caller.
NOP SP // tell vet SP changed - stop checking offsets
MOVL 8(SP), AX // f's caller's PC
MOVL AX, (m_morebuf+gobuf_pc)(BX)
LEAL 16(SP), AX // f's caller's SP
MOVL AX, (m_morebuf+gobuf_sp)(BX)
get_tls(CX)
MOVL g(CX), SI
MOVL SI, (m_morebuf+gobuf_g)(BX)
// Set g->sched to context in f.
MOVL 0(SP), AX // f's PC
MOVL AX, (g_sched+gobuf_pc)(SI)
MOVL SI, (g_sched+gobuf_g)(SI)
LEAL 8(SP), AX // f's SP
MOVL AX, (g_sched+gobuf_sp)(SI)
MOVL DX, (g_sched+gobuf_ctxt)(SI)
// Call newstack on m->g0's stack.
MOVL m_g0(BX), BX
MOVL BX, g(CX)
MOVL (g_sched+gobuf_sp)(BX), SP
CALL runtime·newstack(SB)
MOVL $0, 0x1003 // crash if newstack returns
RET
// morestack trampolines
TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
#define DISPATCH(NAME,MAXSIZE) \
CMPL CX, $MAXSIZE; \
JA 3(PC); \
MOVL $NAME(SB), AX; \
JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT, $0-20
MOVLQZX argsize+12(FP), CX
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
DISPATCH(runtime·call128, 128)
DISPATCH(runtime·call256, 256)
DISPATCH(runtime·call512, 512)
DISPATCH(runtime·call1024, 1024)
DISPATCH(runtime·call2048, 2048)
DISPATCH(runtime·call4096, 4096)
DISPATCH(runtime·call8192, 8192)
DISPATCH(runtime·call16384, 16384)
DISPATCH(runtime·call32768, 32768)
DISPATCH(runtime·call65536, 65536)
DISPATCH(runtime·call131072, 131072)
DISPATCH(runtime·call262144, 262144)
DISPATCH(runtime·call524288, 524288)
DISPATCH(runtime·call1048576, 1048576)
DISPATCH(runtime·call2097152, 2097152)
DISPATCH(runtime·call4194304, 4194304)
DISPATCH(runtime·call8388608, 8388608)
DISPATCH(runtime·call16777216, 16777216)
DISPATCH(runtime·call33554432, 33554432)
DISPATCH(runtime·call67108864, 67108864)
DISPATCH(runtime·call134217728, 134217728)
DISPATCH(runtime·call268435456, 268435456)
DISPATCH(runtime·call536870912, 536870912)
DISPATCH(runtime·call1073741824, 1073741824)
MOVL $runtime·badreflectcall(SB), AX
JMP AX
#define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
MOVL argptr+8(FP), SI; \
MOVL argsize+12(FP), CX; \
MOVL SP, DI; \
REP;MOVSB; \
/* call function */ \
MOVL f+4(FP), DX; \
MOVL (DX), AX; \
CALL AX; \
/* copy return values back */ \
MOVL argtype+0(FP), DX; \
MOVL argptr+8(FP), DI; \
MOVL argsize+12(FP), CX; \
MOVL retoffset+16(FP), BX; \
MOVL SP, SI; \
ADDL BX, DI; \
ADDL BX, SI; \
SUBL BX, CX; \
CALL callRet<>(SB); \
RET
// callRet copies return values back at the end of call*. This is a
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $16-0
MOVL DX, 0(SP)
MOVL DI, 4(SP)
MOVL SI, 8(SP)
MOVL CX, 12(SP)
CALL runtime·reflectcallmove(SB)
RET
CALLFN(·call16, 16)
CALLFN(·call32, 32)
CALLFN(·call64, 64)
CALLFN(·call128, 128)
CALLFN(·call256, 256)
CALLFN(·call512, 512)
CALLFN(·call1024, 1024)
CALLFN(·call2048, 2048)
CALLFN(·call4096, 4096)
CALLFN(·call8192, 8192)
CALLFN(·call16384, 16384)
CALLFN(·call32768, 32768)
CALLFN(·call65536, 65536)
CALLFN(·call131072, 131072)
CALLFN(·call262144, 262144)
CALLFN(·call524288, 524288)
CALLFN(·call1048576, 1048576)
CALLFN(·call2097152, 2097152)
CALLFN(·call4194304, 4194304)
CALLFN(·call8388608, 8388608)
CALLFN(·call16777216, 16777216)
CALLFN(·call33554432, 33554432)
CALLFN(·call67108864, 67108864)
CALLFN(·call134217728, 134217728)
CALLFN(·call268435456, 268435456)
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:
PAUSE
SUBL $1, AX
JNZ again
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
RET
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. pop the caller
// 2. sub 5 bytes from the callers return
// 3. jmp to the argument
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
MOVL fv+0(FP), DX
MOVL argp+4(FP), BX
LEAL -8(BX), SP // caller sp after CALL
SUBL $5, (SP) // return to CALL again
MOVL 0(DX), BX
JMP BX // but first run the deferred function
// func asmcgocall(fn, arg unsafe.Pointer) int32
// Not implemented.
TEXT runtime·asmcgocall(SB),NOSPLIT,$0-12
MOVL 0, AX // crash
MOVL $0, ret+8(FP) // for vet
RET
// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
// Not implemented.
TEXT runtime·cgocallback(SB),NOSPLIT,$0-16
MOVL 0, AX
RET
// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
// Not implemented.
TEXT ·cgocallback_gofunc(SB),NOSPLIT,$0-16
MOVL 0, AX
RET
// void setg(G*); set g. for use by needm.
// Not implemented.
TEXT runtime·setg(SB), NOSPLIT, $0-4
MOVL 0, AX
RET
TEXT runtime·abort(SB),NOSPLIT,$0-0
INT $3
loop:
JMP loop
// check that SP is in range [g->stack.lo, g->stack.hi)
TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
get_tls(CX)
MOVL g(CX), AX
CMPL (g_stack+stack_hi)(AX), SP
JHI 2(PC)
MOVL 0, AX
CMPL SP, (g_stack+stack_lo)(AX)
JHI 2(PC)
MOVL 0, AX
RET
// int64 runtime·cputicks(void)
TEXT runtime·cputicks(SB),NOSPLIT,$0-0
RDTSC
SHLQ $32, DX
ADDQ DX, AX
MOVQ AX, ret+0(FP)
RET
// hash function using AES hardware instructions
// For now, our one amd64p32 system (NaCl) does not
// support using AES instructions, so have not bothered to
// write the implementations. Can copy and adjust the ones
// in asm_amd64.s when the time comes.
TEXT runtime·memhash(SB),NOSPLIT,$0-20
JMP runtime·memhashFallback(SB)
TEXT runtime·strhash(SB),NOSPLIT,$0-12
JMP runtime·strhashFallback(SB)
TEXT runtime·memhash32(SB),NOSPLIT,$0-12
JMP runtime·memhash32Fallback(SB)
TEXT runtime·memhash64(SB),NOSPLIT,$0-12
JMP runtime·memhash64Fallback(SB)
TEXT runtime·return0(SB), NOSPLIT, $0
MOVL $0, AX
RET
// The top-most function running on a goroutine
// returns to goexit+PCQuantum.
TEXT runtime·goexit(SB),NOSPLIT,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
// traceback from goexit1 must hit code range of goexit
BYTE $0x90 // NOP
TEXT ·checkASM(SB),NOSPLIT,$0-1
MOVB $1, ret+0(FP)
RET
// gcWriteBarrier performs a heap pointer write and informs the GC.
//
// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
// - DI is the destination of the write
// - AX is the value being written at DI
// It clobbers FLAGS and SI. It does not clobber any other general-purpose registers,
// but may clobber others (e.g., SSE registers).
TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$88
// Save the registers clobbered by the fast path. This is slightly
// faster than having the caller spill these.
MOVQ R14, 72(SP)
MOVQ R13, 80(SP)
// TODO: Consider passing g.m.p in as an argument so they can be shared
// across a sequence of write barriers.
get_tls(R13)
MOVL g(R13), R13
MOVL g_m(R13), R13
MOVL m_p(R13), R13
MOVL (p_wbBuf+wbBuf_next)(R13), R14
// Increment wbBuf.next position.
LEAL 8(R14), R14
MOVL R14, (p_wbBuf+wbBuf_next)(R13)
CMPL R14, (p_wbBuf+wbBuf_end)(R13)
// Record the write.
MOVL AX, -8(R14) // Record value
MOVL (DI), R13 // TODO: This turns bad writes into bad reads.
MOVL R13, -4(R14) // Record *slot
// Is the buffer full? (flags set in CMPL above)
JEQ flush
ret:
MOVQ 72(SP), R14
MOVQ 80(SP), R13
// Do the write.
MOVL AX, (DI)
RET // Clobbers SI on NaCl
flush:
// Save all general purpose registers since these could be
// clobbered by wbBufFlush and were not saved by the caller.
// It is possible for wbBufFlush to clobber other registers
// (e.g., SSE registers), but the compiler takes care of saving
// those in the caller if necessary. This strikes a balance
// with registers that are likely to be used.
//
// We don't have type information for these, but all code under
// here is NOSPLIT, so nothing will observe these.
//
// TODO: We could strike a different balance; e.g., saving X0
// and not saving GP registers that are less likely to be used.
MOVL DI, 0(SP) // Also first argument to wbBufFlush
MOVL AX, 4(SP) // Also second argument to wbBufFlush
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
MOVQ DX, 24(SP)
// DI already saved
// SI is always clobbered on nacl
// BP is reserved on nacl
MOVQ R8, 32(SP)
MOVQ R9, 40(SP)
MOVQ R10, 48(SP)
MOVQ R11, 56(SP)
MOVQ R12, 64(SP)
// R13 already saved
// R14 already saved
// R15 is reserved on nacl
// This takes arguments DI and AX
CALL runtime·wbBufFlush(SB)
MOVL 0(SP), DI
MOVL 4(SP), AX
MOVQ 8(SP), BX
MOVQ 16(SP), CX
MOVQ 24(SP), DX
MOVQ 32(SP), R8
MOVQ 40(SP), R9
MOVQ 48(SP), R10
MOVQ 56(SP), R11
MOVQ 64(SP), R12
JMP ret
// Note: these functions use a special calling convention to save generated code space.
// Arguments are passed in registers, but the space for those arguments are allocated
// in the caller's stack frame. These stubs write the args into that stack space and
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
TEXT runtime·panicIndex(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicIndex(SB)
TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicIndexU(SB)
TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSliceAlen(SB)
TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSliceAlenU(SB)
TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSliceAcap(SB)
TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSliceAcapU(SB)
TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicSliceB(SB)
TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicSliceBU(SB)
TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8
MOVL DX, x+0(FP)
MOVL BX, y+4(FP)
JMP runtime·goPanicSlice3Alen(SB)
TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8
MOVL DX, x+0(FP)
MOVL BX, y+4(FP)
JMP runtime·goPanicSlice3AlenU(SB)
TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8
MOVL DX, x+0(FP)
MOVL BX, y+4(FP)
JMP runtime·goPanicSlice3Acap(SB)
TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8
MOVL DX, x+0(FP)
MOVL BX, y+4(FP)
JMP runtime·goPanicSlice3AcapU(SB)
TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSlice3B(SB)
TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8
MOVL CX, x+0(FP)
MOVL DX, y+4(FP)
JMP runtime·goPanicSlice3BU(SB)
TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicSlice3C(SB)
TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8
MOVL AX, x+0(FP)
MOVL CX, y+4(FP)
JMP runtime·goPanicSlice3CU(SB)
// Extended versions for 64-bit indexes.
TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendIndex(SB)
TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendIndexU(SB)
TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSliceAlen(SB)
TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSliceAlenU(SB)
TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSliceAcap(SB)
TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSliceAcapU(SB)
TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendSliceB(SB)
TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendSliceBU(SB)
TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL DX, lo+4(FP)
MOVL BX, y+8(FP)
JMP runtime·goPanicExtendSlice3Alen(SB)
TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL DX, lo+4(FP)
MOVL BX, y+8(FP)
JMP runtime·goPanicExtendSlice3AlenU(SB)
TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL DX, lo+4(FP)
MOVL BX, y+8(FP)
JMP runtime·goPanicExtendSlice3Acap(SB)
TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL DX, lo+4(FP)
MOVL BX, y+8(FP)
JMP runtime·goPanicExtendSlice3AcapU(SB)
TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSlice3B(SB)
TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL CX, lo+4(FP)
MOVL DX, y+8(FP)
JMP runtime·goPanicExtendSlice3BU(SB)
TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendSlice3C(SB)
TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12
MOVL SI, hi+0(FP)
MOVL AX, lo+4(FP)
MOVL CX, y+8(FP)
JMP runtime·goPanicExtendSlice3CU(SB)

View File

@ -1,13 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
/*
* void crosscall2(void (*fn)(void*, int32), void*, int32)
* Save registers and call fn with two arguments.
*/
TEXT crosscall2(SB),NOSPLIT,$0
INT $3
RET

View File

@ -1,71 +0,0 @@
package runtime
const (
// These values are referred to in the source code
// but really don't matter. Even so, use the standard numbers.
_SIGQUIT = 3
_SIGTRAP = 5
_SIGSEGV = 11
_SIGPROF = 27
)
type timespec struct {
tv_sec int64
tv_nsec int32
}
//go:nosplit
func (ts *timespec) setNsec(ns int64) {
ts.tv_sec = ns / 1e9
ts.tv_nsec = int32(ns % 1e9)
}
type excregs386 struct {
eax uint32
ecx uint32
edx uint32
ebx uint32
esp uint32
ebp uint32
esi uint32
edi uint32
eip uint32
eflags uint32
}
type excregsamd64 struct {
rax uint64
rcx uint64
rdx uint64
rbx uint64
rsp uint64
rbp uint64
rsi uint64
rdi uint64
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rip uint64
rflags uint32
}
type exccontext struct {
size uint32
portable_context_offset uint32
portable_context_size uint32
arch uint32
regs_size uint32
reserved [11]uint32
regs excregsamd64
}
type excportablecontext struct {
pc uint32
sp uint32
fp uint32
}

View File

@ -1,159 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool Cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Cas(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-17
JMP runtimeinternalatomic·Cas(SB)
TEXT runtimeinternalatomic·CasRel(SB), NOSPLIT, $0-17
JMP runtimeinternalatomic·Cas(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Store(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-24
JMP runtimeinternalatomic·Xadd64(SB)
// bool runtimeinternalatomic·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-25
MOVL ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
// bool Casp1(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
// uint32 Xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xadd64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BX)
ADDQ CX, AX
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Xadd(SB)
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xchg64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ new+8(FP), AX
TESTL $7, BX
JZ 2(PC)
MOVL 0, BX // crash when unaligned
XCHGQ AX, 0(BX)
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Xchg(SB)
TEXT runtimeinternalatomic·StorepNoWB(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·StoreRel(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Store(SB)
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-16
MOVL ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
// void runtimeinternalatomic·Or8(byte volatile*, byte);
TEXT runtimeinternalatomic·Or8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ORB AX, 0(BX)
RET
// void runtimeinternalatomic·And8(byte volatile*, byte);
TEXT runtimeinternalatomic·And8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ANDB AX, 0(BX)
RET

View File

@ -1,16 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sys
const (
ArchFamily = AMD64
BigEndian = false
DefaultPhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
PCQuantum = 1
Int64Align = 8
MinFrameSize = 0
)
type Uintreg uint64

View File

@ -1,31 +0,0 @@
// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
// +build amd64p32
package sys
const GOARCH = `amd64p32`
const Goarch386 = 0
const GoarchAmd64 = 0
const GoarchAmd64p32 = 1
const GoarchArm = 0
const GoarchArmbe = 0
const GoarchArm64 = 0
const GoarchArm64be = 0
const GoarchPpc64 = 0
const GoarchPpc64le = 0
const GoarchMips = 0
const GoarchMipsle = 0
const GoarchMips64 = 0
const GoarchMips64le = 0
const GoarchMips64p32 = 0
const GoarchMips64p32le = 0
const GoarchPpc = 0
const GoarchRiscv = 0
const GoarchRiscv64 = 0
const GoarchS390 = 0
const GoarchS390x = 0
const GoarchSparc = 0
const GoarchSparc64 = 0
const GoarchWasm = 0

View File

@ -1,24 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8
MOVL ptr+0(FP), DI
MOVL n+4(FP), CX
MOVQ CX, BX
ANDQ $3, BX
SHRQ $2, CX
MOVQ $0, AX
CLD
REP
STOSL
MOVQ BX, CX
REP
STOSB
// Note: we zero only 4 bytes at a time so that the tail is at most
// 3 bytes. That guarantees that we aren't zeroing pointers with STOSB.
// See issue 13160.
RET

View File

@ -1,53 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// This could use MOVSQ, but we use MOVSL so that if an object ends in
// a 4 byte pointer, we copy it as a unit instead of byte by byte.
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-12
MOVL to+0(FP), DI
MOVL from+4(FP), SI
MOVL n+8(FP), BX
CMPL SI, DI
JLS back
forward:
MOVL BX, CX
SHRL $2, CX
ANDL $3, BX
REP; MOVSL
MOVL BX, CX
REP; MOVSB
RET
back:
MOVL SI, CX
ADDL BX, CX
CMPL CX, DI
JLS forward
ADDL BX, DI
ADDL BX, SI
STD
MOVL BX, CX
SHRL $2, CX
ANDL $3, BX
SUBL $4, DI
SUBL $4, SI
REP; MOVSL
ADDL $3, DI
ADDL $3, SI
MOVL BX, CX
REP; MOVSB
CLD
// Note: we copy only 4 bytes at a time so that the tail is at most
// 3 bytes. That guarantees that we aren't copying pointers with MOVSB.
// See issue 13160.
RET

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Fake network poller for wasm/js. // Fake network poller for wasm/js.
// Should never be used, because NaCl and wasm/js network connections do not honor "SetNonblock". // Should never be used, because wasm/js network connections do not honor "SetNonblock".
// +build js,wasm // +build js,wasm

View File

@ -1,30 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// NaCl entry on 32-bit x86 has DI pointing at the arg block, which contains:
//
// 0(DI) - cleanup function pointer, always 0
// 4(DI) - envc
// 8(DI) - argc
// 12(DI) - argv, then 0, then envv, then 0, then auxv
// NaCl entry here is almost the same, except that there
// is no saved caller PC, so 0(FP) is -8(FP) and so on.
TEXT _rt0_amd64p32_nacl(SB),NOSPLIT,$16
MOVL DI, 0(SP)
CALL runtime·nacl_sysinfo(SB)
MOVL 0(SP), DI
MOVL 8(DI), AX
LEAL 12(DI), BX
MOVL AX, 0(SP)
MOVL BX, 4(SP)
CALL main(SB)
INT $3
TEXT main(SB),NOSPLIT,$0
// Uncomment for fake time like on Go Playground.
//MOVQ $1257894000000000000, AX
//MOVQ AX, runtime·faketime(SB)
JMP runtime·rt0_go(SB)

View File

@ -1,53 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
func nacl_sysinfo(di uint32) // cross-assembly-file call; declared for vet
type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) regs() *excregsamd64 {
return &(*exccontext)(c.ctxt).regs
}
func (c *sigctxt) rax() uint64 { return c.regs().rax }
func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
func (c *sigctxt) r8() uint64 { return c.regs().r8 }
func (c *sigctxt) r9() uint64 { return c.regs().r9 }
func (c *sigctxt) r10() uint64 { return c.regs().r10 }
func (c *sigctxt) r11() uint64 { return c.regs().r11 }
func (c *sigctxt) r12() uint64 { return c.regs().r12 }
func (c *sigctxt) r13() uint64 { return c.regs().r13 }
func (c *sigctxt) r14() uint64 { return c.regs().r14 }
func (c *sigctxt) r15() uint64 { return c.regs().r15 }
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) rip() uint64 { return c.regs().rip }
func (c *sigctxt) rflags() uint64 { return uint64(c.regs().rflags) }
func (c *sigctxt) cs() uint64 { return ^uint64(0) }
func (c *sigctxt) fs() uint64 { return ^uint64(0) }
func (c *sigctxt) gs() uint64 { return ^uint64(0) }
func (c *sigctxt) sigcode() uint64 { return ^uint64(0) }
func (c *sigctxt) sigaddr() uint64 { return 0 }
func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) {}
func (c *sigctxt) set_sigaddr(x uint64) {}

View File

@ -1,482 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "go_tls.h"
#include "textflag.h"
#include "syscall_nacl.h"
#define NACL_SYSCALL(code) \
MOVL $(0x10000 + ((code)<<5)), AX; CALL AX
TEXT runtime·settls(SB),NOSPLIT,$0
MOVL DI, TLS // really BP
RET
TEXT runtime·exit(SB),NOSPLIT,$0
MOVL code+0(FP), DI
NACL_SYSCALL(SYS_exit)
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), DI
// SYS_thread_exit will clear *wait when the stack is free.
NACL_SYSCALL(SYS_thread_exit)
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVL name+0(FP), DI
MOVL mode+4(FP), SI
MOVL perm+8(FP), DX
NACL_SYSCALL(SYS_open)
MOVL AX, ret+16(FP)
RET
TEXT runtime·closefd(SB),NOSPLIT,$0
MOVL fd+0(FP), DI
NACL_SYSCALL(SYS_close)
MOVL AX, ret+8(FP)
RET
TEXT runtime·read(SB),NOSPLIT,$0
MOVL fd+0(FP), DI
MOVL p+4(FP), SI
MOVL n+8(FP), DX
NACL_SYSCALL(SYS_read)
MOVL AX, ret+16(FP)
RET
TEXT syscall·naclWrite(SB), NOSPLIT, $24-20
MOVL arg1+0(FP), DI
MOVL arg2+4(FP), SI
MOVL arg3+8(FP), DX
MOVL DI, 0(SP)
MOVL SI, 4(SP)
MOVL DX, 8(SP)
CALL runtime·write(SB)
MOVL 16(SP), AX
MOVL AX, ret+16(FP)
RET
TEXT runtime·write(SB),NOSPLIT,$16-20
// If using fake time and writing to stdout or stderr,
// emit playback header before actual data.
MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ write
MOVL fd+0(FP), DI
CMPL DI, $1
JEQ playback
CMPL DI, $2
JEQ playback
write:
// Ordinary write.
MOVL fd+0(FP), DI
MOVL p+4(FP), SI
MOVL n+8(FP), DX
NACL_SYSCALL(SYS_write)
MOVL AX, ret+16(FP)
RET
// Write with playback header.
// First, lock to avoid interleaving writes.
playback:
MOVL $1, BX
XCHGL runtime·writelock(SB), BX
CMPL BX, $0
JNE playback
MOVQ runtime·lastfaketime(SB), CX
MOVL runtime·lastfaketimefd(SB), BX
CMPL DI, BX
JE samefd
// If the current fd doesn't match the fd of the previous write,
// ensure that the timestamp is strictly greater. That way, we can
// recover the original order even if we read the fds separately.
INCQ CX
MOVL DI, runtime·lastfaketimefd(SB)
samefd:
CMPQ AX, CX
CMOVQLT CX, AX
MOVQ AX, runtime·lastfaketime(SB)
// Playback header: 0 0 P B <8-byte time> <4-byte data length>
MOVL $(('B'<<24) | ('P'<<16)), 0(SP)
BSWAPQ AX
MOVQ AX, 4(SP)
MOVL n+8(FP), DX
BSWAPL DX
MOVL DX, 12(SP)
MOVL fd+0(FP), DI
MOVL SP, SI
MOVL $16, DX
NACL_SYSCALL(SYS_write)
// Write actual data.
MOVL fd+0(FP), DI
MOVL p+4(FP), SI
MOVL n+8(FP), DX
NACL_SYSCALL(SYS_write)
// Unlock.
MOVL $0, runtime·writelock(SB)
MOVL AX, ret+16(FP)
RET
TEXT runtime·nacl_exception_stack(SB),NOSPLIT,$0
MOVL p+0(FP), DI
MOVL size+4(FP), SI
NACL_SYSCALL(SYS_exception_stack)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_exception_handler(SB),NOSPLIT,$0
MOVL fn+0(FP), DI
MOVL arg+4(FP), SI
NACL_SYSCALL(SYS_exception_handler)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_sem_create(SB),NOSPLIT,$0
MOVL flag+0(FP), DI
NACL_SYSCALL(SYS_sem_create)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_sem_wait(SB),NOSPLIT,$0
MOVL sem+0(FP), DI
NACL_SYSCALL(SYS_sem_wait)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_sem_post(SB),NOSPLIT,$0
MOVL sem+0(FP), DI
NACL_SYSCALL(SYS_sem_post)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_mutex_create(SB),NOSPLIT,$0
MOVL flag+0(FP), DI
NACL_SYSCALL(SYS_mutex_create)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_mutex_lock(SB),NOSPLIT,$0
MOVL mutex+0(FP), DI
NACL_SYSCALL(SYS_mutex_lock)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_mutex_trylock(SB),NOSPLIT,$0
MOVL mutex+0(FP), DI
NACL_SYSCALL(SYS_mutex_trylock)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_mutex_unlock(SB),NOSPLIT,$0
MOVL mutex+0(FP), DI
NACL_SYSCALL(SYS_mutex_unlock)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_cond_create(SB),NOSPLIT,$0
MOVL flag+0(FP), DI
NACL_SYSCALL(SYS_cond_create)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_cond_wait(SB),NOSPLIT,$0
MOVL cond+0(FP), DI
MOVL n+4(FP), SI
NACL_SYSCALL(SYS_cond_wait)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_cond_signal(SB),NOSPLIT,$0
MOVL cond+0(FP), DI
NACL_SYSCALL(SYS_cond_signal)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_cond_broadcast(SB),NOSPLIT,$0
MOVL cond+0(FP), DI
NACL_SYSCALL(SYS_cond_broadcast)
MOVL AX, ret+8(FP)
RET
TEXT runtime·nacl_cond_timed_wait_abs(SB),NOSPLIT,$0
MOVL cond+0(FP), DI
MOVL lock+4(FP), SI
MOVL ts+8(FP), DX
NACL_SYSCALL(SYS_cond_timed_wait_abs)
MOVL AX, ret+16(FP)
RET
TEXT runtime·nacl_thread_create(SB),NOSPLIT,$0
MOVL fn+0(FP), DI
MOVL stk+4(FP), SI
MOVL tls+8(FP), DX
MOVL xx+12(FP), CX
NACL_SYSCALL(SYS_thread_create)
MOVL AX, ret+16(FP)
RET
TEXT runtime·mstart_nacl(SB),NOSPLIT,$0
NACL_SYSCALL(SYS_tls_get)
SUBL $8, AX
MOVL AX, TLS
JMP runtime·mstart(SB)
TEXT runtime·nacl_nanosleep(SB),NOSPLIT,$0
MOVL ts+0(FP), DI
MOVL extra+4(FP), SI
NACL_SYSCALL(SYS_nanosleep)
MOVL AX, ret+8(FP)
RET
TEXT runtime·osyield(SB),NOSPLIT,$0
NACL_SYSCALL(SYS_sched_yield)
RET
TEXT runtime·mmap(SB),NOSPLIT,$8
MOVL addr+0(FP), DI
MOVL n+4(FP), SI
MOVL prot+8(FP), DX
MOVL flags+12(FP), CX
MOVL fd+16(FP), R8
MOVL off+20(FP), AX
MOVQ AX, 0(SP)
MOVL SP, R9
NACL_SYSCALL(SYS_mmap)
CMPL AX, $-4095
JNA ok
NEGL AX
MOVL $0, p+24(FP)
MOVL AX, err+28(FP)
RET
ok:
MOVL AX, p+24(FP)
MOVL $0, err+28(FP)
RET
TEXT runtime·walltime(SB),NOSPLIT,$16
MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ realtime
MOVQ $0, DX
MOVQ $1000000000, CX
DIVQ CX
MOVQ AX, sec+0(FP)
MOVL DX, nsec+8(FP)
RET
realtime:
MOVL $0, DI // real time clock
LEAL 0(SP), AX
MOVL AX, SI // timespec
NACL_SYSCALL(SYS_clock_gettime)
MOVL 0(SP), AX // low 32 sec
MOVL 4(SP), CX // high 32 sec
MOVL 8(SP), BX // nsec
// sec is in AX, nsec in BX
MOVL AX, sec_lo+0(FP)
MOVL CX, sec_hi+4(FP)
MOVL BX, nsec+8(FP)
RET
TEXT syscall·now(SB),NOSPLIT,$0
JMP runtime·walltime(SB)
TEXT runtime·nanotime(SB),NOSPLIT,$16
MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ 3(PC)
MOVQ AX, ret+0(FP)
RET
MOVL $0, DI // real time clock
LEAL 0(SP), AX
MOVL AX, SI // timespec
NACL_SYSCALL(SYS_clock_gettime)
MOVQ 0(SP), AX // sec
MOVL 8(SP), DX // nsec
// sec is in AX, nsec in DX
// return nsec in AX
IMULQ $1000000000, AX
ADDQ DX, AX
MOVQ AX, ret+0(FP)
RET
TEXT runtime·sigtramp(SB),NOSPLIT,$80
// restore TLS register at time of execution,
// in case it's been smashed.
// the TLS register is really BP, but for consistency
// with non-NaCl systems it is referred to here as TLS.
// NOTE: Cannot use SYS_tls_get here (like we do in mstart_nacl),
// because the main thread never calls tls_set.
LEAL ctxt+0(FP), AX
MOVL (16*4+5*8)(AX), AX
MOVL AX, TLS
// check that g exists
get_tls(CX)
MOVL g(CX), DI
CMPL DI, $0
JEQ nog
// save g
MOVL DI, 20(SP)
// g = m->gsignal
MOVL g_m(DI), BX
MOVL m_gsignal(BX), BX
MOVL BX, g(CX)
//JMP debughandler
// copy arguments for sighandler
MOVL $11, 0(SP) // signal
MOVL $0, 4(SP) // siginfo
LEAL ctxt+0(FP), AX
MOVL AX, 8(SP) // context
MOVL DI, 12(SP) // g
CALL runtime·sighandler(SB)
// restore g
get_tls(CX)
MOVL 20(SP), BX
MOVL BX, g(CX)
// Enable exceptions again.
NACL_SYSCALL(SYS_exception_clear_flag)
// Restore registers as best we can. Impossible to do perfectly.
// See comment in sys_nacl_386.s for extended rationale.
LEAL ctxt+0(FP), SI
ADDL $64, SI
MOVQ 0(SI), AX
MOVQ 8(SI), CX
MOVQ 16(SI), DX
MOVQ 24(SI), BX
MOVL 32(SI), SP // MOVL for SP sandboxing
// 40(SI) is saved BP aka TLS, already restored above
// 48(SI) is saved SI, never to be seen again
MOVQ 56(SI), DI
MOVQ 64(SI), R8
MOVQ 72(SI), R9
MOVQ 80(SI), R10
MOVQ 88(SI), R11
MOVQ 96(SI), R12
MOVQ 104(SI), R13
MOVQ 112(SI), R14
// 120(SI) is R15, which is owned by Native Client and must not be modified
MOVQ 128(SI), SI // saved PC
// 136(SI) is saved EFLAGS, never to be seen again
JMP SI
//debughandler:
//// print basic information
//LEAL ctxt+0(FP), DI
//MOVL $runtime·sigtrampf(SB), AX
//MOVL AX, 0(SP)
//MOVQ (16*4+16*8)(DI), BX // rip
//MOVQ BX, 8(SP)
//MOVQ (16*4+0*8)(DI), BX // rax
//MOVQ BX, 16(SP)
//MOVQ (16*4+1*8)(DI), BX // rcx
//MOVQ BX, 24(SP)
//MOVQ (16*4+2*8)(DI), BX // rdx
//MOVQ BX, 32(SP)
//MOVQ (16*4+3*8)(DI), BX // rbx
//MOVQ BX, 40(SP)
//MOVQ (16*4+7*8)(DI), BX // rdi
//MOVQ BX, 48(SP)
//MOVQ (16*4+15*8)(DI), BX // r15
//MOVQ BX, 56(SP)
//MOVQ (16*4+4*8)(DI), BX // rsp
//MOVQ 0(BX), BX
//MOVQ BX, 64(SP)
//CALL runtime·printf(SB)
//
//LEAL ctxt+0(FP), DI
//MOVQ (16*4+16*8)(DI), BX // rip
//MOVL BX, 0(SP)
//MOVQ (16*4+4*8)(DI), BX // rsp
//MOVL BX, 4(SP)
//MOVL $0, 8(SP) // lr
//get_tls(CX)
//MOVL g(CX), BX
//MOVL BX, 12(SP) // gp
//CALL runtime·traceback(SB)
notls:
MOVL 0, AX
RET
nog:
MOVL 0, AX
RET
// cannot do real signal handling yet, because gsignal has not been allocated.
MOVL $1, DI; NACL_SYSCALL(SYS_exit)
// func getRandomData([]byte)
TEXT runtime·getRandomData(SB),NOSPLIT,$0-12
MOVL arg_base+0(FP), DI
MOVL arg_len+4(FP), SI
NACL_SYSCALL(SYS_get_random_bytes)
RET
TEXT runtime·nacl_sysinfo(SB),NOSPLIT,$16
/*
MOVL di+0(FP), DI
LEAL 12(DI), BX
MOVL 8(DI), AX
ADDL 4(DI), AX
ADDL $2, AX
LEAL (BX)(AX*4), BX
MOVL BX, runtime·nacl_irt_query(SB)
auxloop:
MOVL 0(BX), DX
CMPL DX, $0
JNE 2(PC)
RET
CMPL DX, $32
JEQ auxfound
ADDL $8, BX
JMP auxloop
auxfound:
MOVL 4(BX), BX
MOVL BX, runtime·nacl_irt_query(SB)
LEAL runtime·nacl_irt_basic_v0_1_str(SB), DI
LEAL runtime·nacl_irt_basic_v0_1(SB), SI
MOVL runtime·nacl_irt_basic_v0_1_size(SB), DX
MOVL runtime·nacl_irt_query(SB), BX
CALL BX
LEAL runtime·nacl_irt_memory_v0_3_str(SB), DI
LEAL runtime·nacl_irt_memory_v0_3(SB), SI
MOVL runtime·nacl_irt_memory_v0_3_size(SB), DX
MOVL runtime·nacl_irt_query(SB), BX
CALL BX
LEAL runtime·nacl_irt_thread_v0_1_str(SB), DI
LEAL runtime·nacl_irt_thread_v0_1(SB), SI
MOVL runtime·nacl_irt_thread_v0_1_size(SB), DX
MOVL runtime·nacl_irt_query(SB), BX
CALL BX
// TODO: Once we have a NaCl SDK with futex syscall support,
// try switching to futex syscalls and here load the
// nacl-irt-futex-0.1 table.
*/
RET

View File

@ -1,42 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
#include "funcdata.h"
#include "../runtime/syscall_nacl.h"
//
// System call support for amd64, Native Client
//
#define NACL_SYSCALL(code) \
MOVL $(0x10000 + ((code)<<5)), AX; CALL AX
#define NACL_SYSJMP(code) \
MOVL $(0x10000 + ((code)<<5)), AX; JMP AX
TEXT ·Syscall(SB),NOSPLIT,$0-28
CALL runtime·entersyscall(SB)
MOVL trap+0(FP), AX
MOVL a1+4(FP), DI
MOVL a2+8(FP), SI
MOVL a3+12(FP), DX
// more args would use CX, R8, R9
SHLL $5, AX
ADDL $0x10000, AX
CALL AX
CMPL AX, $0
JGE ok
MOVL $-1, r1+16(FP)
MOVL $-1, r2+20(FP)
NEGL AX
MOVL AX, err+24(FP)
CALL runtime·exitsyscall(SB)
RET
ok:
MOVL AX, r1+16(FP)
MOVL DX, r2+20(FP)
MOVL $0, err+24(FP)
CALL runtime·exitsyscall(SB)
RET

View File

@ -265,12 +265,6 @@ linux_s390x)
mksysnum="./mksysnum_linux.pl $unistd_h" mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs" mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;; ;;
nacl_amd64p32)
mkerrors=""
mksyscall="./mksyscall.pl -nacl"
mksysnum=""
mktypes=""
;;
netbsd_386) netbsd_386)
mkerrors="$mkerrors -m32" mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd" mksyscall="./mksyscall.pl -l32 -netbsd"

View File

@ -29,7 +29,6 @@ my $darwin = 0;
my $openbsd = 0; my $openbsd = 0;
my $netbsd = 0; my $netbsd = 0;
my $dragonfly = 0; my $dragonfly = 0;
my $nacl = 0;
my $arm = 0; # 64-bit value should use (even, odd)-pair my $arm = 0; # 64-bit value should use (even, odd)-pair
my $tags = ""; # build tags my $tags = ""; # build tags
@ -60,10 +59,6 @@ if($ARGV[0] eq "-dragonfly") {
$dragonfly = 1; $dragonfly = 1;
shift; shift;
} }
if($ARGV[0] eq "-nacl") {
$nacl = 1;
shift;
}
if($ARGV[0] eq "-arm") { if($ARGV[0] eq "-arm") {
$arm = 1; $arm = 1;
shift; shift;
@ -262,9 +257,6 @@ while(<>) {
$sysname = "SYS_$func"; $sysname = "SYS_$func";
$sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
$sysname =~ y/a-z/A-Z/; $sysname =~ y/a-z/A-Z/;
if($nacl) {
$sysname =~ y/A-Z/a-z/;
}
if($darwin) { if($darwin) {
$sysname =~ y/A-Z/a-z/; $sysname =~ y/A-Z/a-z/;
$sysname = substr $sysname, 4; $sysname = substr $sysname, 4;

View File

@ -1,23 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscall
type Timespec struct {
Sec int64
Nsec int32
}
type Timeval struct {
Sec int64
Usec int32
}
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: int32(nsec)}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: int32(usec)}
}

View File

@ -8,7 +8,11 @@ package syscall
import "runtime" import "runtime"
// TODO: generate with runtime/mknacl.sh, allow override with IRT. // These were originally used by Nacl, then later also used by
// js/wasm. Now that they're only used by js/wasm, these numbers are
// just arbitrary.
//
// TODO: delete? replace with something meaningful?
const ( const (
sys_null = 1 sys_null = 1
sys_nameservice = 2 sys_nameservice = 2

View File

@ -1,11 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT ·startTimer(SB),NOSPLIT,$0
JMP time·startTimer(SB)
TEXT ·stopTimer(SB),NOSPLIT,$0
JMP time·stopTimer(SB)

View File

@ -1,71 +0,0 @@
// mksyscall.pl -nacl -tags nacl,amd64p32 syscall_nacl.go syscall_nacl_amd64p32.go
// Code generated by the command above; DO NOT EDIT.
// +build nacl,amd64p32
package syscall
import "unsafe"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func naclClose(fd int) (err error) {
_, _, e1 := Syscall(sys_close, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func naclFstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(sys_fstat, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func naclRead(fd int, b []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(sys_read, uintptr(fd), uintptr(_p0), uintptr(len(b)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func naclSeek(fd int, off *int64, whence int) (err error) {
_, _, e1 := Syscall(sys_lseek, uintptr(fd), uintptr(unsafe.Pointer(off)), uintptr(whence))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func naclGetRandomBytes(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(sys_get_random_bytes, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}