Skip to content

Commit

Permalink
all: remove the nacl port (part 2, amd64p32 + toolchain)
Browse files Browse the repository at this point in the history
This is part two if the nacl removal. Part 1 was CL 199499.

This CL removes amd64p32 support, which might be useful in the future
if we implement the x32 ABI. It also removes the nacl bits in the
toolchain, and some remaining nacl bits.

Updates #30439

Change-Id: I2475d5bb066d1b474e00e40d95b520e7c2e286e1
Reviewed-on: https://go-review.googlesource.com/c/go/+/200077
Reviewed-by: Ian Lance Taylor <iant@golang.org>
  • Loading branch information
bradfitz committed Oct 9, 2019
1 parent 19a7490 commit 07b4abd
Show file tree
Hide file tree
Showing 70 changed files with 387 additions and 4,072 deletions.
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/amd64/ggen.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/arm/ggen.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
Expand Down
2 changes: 0 additions & 2 deletions src/cmd/compile/internal/gc/go.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,8 +247,6 @@ var Ctxt *obj.Link

var writearchive bool

var Nacl bool

var nodfp *Node

var disable_checknil int
Expand Down
1 change: 0 additions & 1 deletion src/cmd/compile/internal/gc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ func Main(archInit func(*Arch)) {
// pseudo-package used for methods with anonymous receivers
gopkg = types.NewPkg("go", "")

Nacl = objabi.GOOS == "nacl"
Wasm := objabi.GOARCH == "wasm"

// Whether the limit for stack-allocated objects is much smaller than normal.
Expand Down
13 changes: 0 additions & 13 deletions src/cmd/compile/internal/ssa/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ type Config struct {
useSSE bool // Use SSE for non-float operations
useAvg bool // Use optimizations that need Avg* operations
useHmul bool // Use optimizations that need Hmul* operations
nacl bool // GOOS=nacl
use387 bool // GO386=387
SoftFloat bool //
Race bool // race detector enabled
Expand Down Expand Up @@ -339,7 +338,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
}
c.ctxt = ctxt
c.optimize = optimize
c.nacl = objabi.GOOS == "nacl"
c.useSSE = true

// Don't use Duff's device nor SSE on Plan 9 AMD64, because
Expand All @@ -349,17 +347,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.useSSE = false
}

if c.nacl {
c.noDuffDevice = true // Don't use Duff's device on NaCl

// Returns clobber BP on nacl/386, so the write
// barrier does.
opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 5 // BP

// ... and SI on nacl/amd64.
opcodeTable[OpAMD64LoweredWB].reg.clobbers |= 1 << 6 // SI
}

if ctxt.Flag_shared {
// LoweredWB is secretly a CALL and CALLs on 386 in
// shared mode get rewritten by obj6.go to go through
Expand Down
82 changes: 41 additions & 41 deletions src/cmd/compile/internal/ssa/gen/AMD64.rules
Original file line number Diff line number Diff line change
Expand Up @@ -596,32 +596,32 @@
// into tests for carry flags.
// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
// mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTQ x y))
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> ((ULT|UGE) (BTQ x y))
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
-> ((ULT|UGE) (BTLconst [log2uint32(c)] x))
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
-> ((ULT|UGE) (BTQconst [log2(c)] x))
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
-> ((ULT|UGE) (BTQconst [log2(c)] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
-> (SET(B|AE) (BTLconst [log2uint32(c)] x))
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
-> (SET(B|AE) (BTQconst [log2(c)] x))
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
-> (SET(B|AE) (BTQconst [log2(c)] x))
// SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
-> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
-> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)

// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
Expand All @@ -641,29 +641,29 @@
(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)

// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTC(Q|L) x y)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTC(Q|L) x y)

// Convert ORconst into BTS, if the code gets smaller, with boundary being
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Qconst [log2(c)] x)
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Lconst [log2uint32(c)] x)
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Qconst [log2(c)] x)
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
-> (BT(S|C)Lconst [log2uint32(c)] x)

// Recognize bit clearing: a &^= 1<<b
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) && !config.nacl -> (BTR(Q|L) x y)
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) -> (BTR(Q|L) x y)
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRQconst [log2(^c)] x)
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRLconst [log2uint32(^c)] x)
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRQconst [log2(^c)] x)
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
-> (BTRLconst [log2uint32(^c)] x)

// Special-case bit patterns on first/last bit.
Expand All @@ -677,40 +677,40 @@
// We thus special-case them, by detecting the shift patterns.

// Special case resetting first/last bit
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) && !config.nacl
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
-> (BTR(L|Q)const [0] x)
(SHRLconst [1] (SHLLconst [1] x)) && !config.nacl
(SHRLconst [1] (SHLLconst [1] x))
-> (BTRLconst [31] x)
(SHRQconst [1] (SHLQconst [1] x)) && !config.nacl
(SHRQconst [1] (SHLQconst [1] x))
-> (BTRQconst [63] x)

// Special case testing first/last bit (with double-shift generated by generic.rules)
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)

((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)

// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 && !config.nacl
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)

// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
Expand Down
28 changes: 14 additions & 14 deletions src/cmd/compile/internal/ssa/gen/ARM.rules
Original file line number Diff line number Diff line change
Expand Up @@ -1246,20 +1246,20 @@
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)

// use indexed loads and stores
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBUloadidx ptr idx mem)
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBloadidx ptr idx mem)
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVBstoreidx ptr idx val mem)
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHUloadidx ptr idx mem)
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHloadidx ptr idx mem)
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVHstoreidx ptr idx val mem)
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBUloadidx ptr idx mem)
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBloadidx ptr idx mem)
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVBstoreidx ptr idx val mem)
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHUloadidx ptr idx mem)
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHloadidx ptr idx mem)
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVHstoreidx ptr idx val mem)

// constant folding in indexed loads and stores
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
Expand Down
9 changes: 0 additions & 9 deletions src/cmd/compile/internal/ssa/regalloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -625,15 +625,6 @@ func (s *regAllocState) init(f *Func) {
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
}
}
if s.f.Config.nacl {
switch s.f.Config.arch {
case "arm":
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
case "amd64p32":
s.allocatable &^= 1 << 5 // BP - reserved for nacl
s.allocatable &^= 1 << 15 // R15 - reserved for nacl
}
}
if s.f.Config.use387 {
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
}
Expand Down
Loading

0 comments on commit 07b4abd

Please sign in to comment.