diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 4ed881bc0722b..504f8f0ec3cc5 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -6,6 +6,8 @@ package gc import ( "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" "math/big" "strings" ) @@ -1397,7 +1399,7 @@ func hascallchan(n *Node) bool { // A constSet represents a set of Go constant expressions. type constSet struct { - m map[constSetKey]*Node + m map[constSetKey]src.XPos } type constSetKey struct { @@ -1405,20 +1407,22 @@ type constSetKey struct { val interface{} } -// add adds constant expressions to s. If a constant expression of -// equal value and identical type has already been added, then that -// type expression is returned. Otherwise, add returns nil. +// add adds constant expression n to s. If a constant expression of +// equal value and identical type has already been added, then add +// reports an error about the duplicate value. // -// add also returns nil if n is not a Go constant expression. +// pos provides position information for where expression n occured +// (in case n does not have its own position information). what and +// where are used in the error message. // // n must not be an untyped constant. -func (s *constSet) add(n *Node) *Node { +func (s *constSet) add(pos src.XPos, n *Node, what, where string) { if n.Op == OCONVIFACE && n.Implicit() { n = n.Left } if !n.isGoConst() { - return nil + return } if n.Type.IsUntyped() { Fatalf("%v is untyped", n) @@ -1448,12 +1452,32 @@ func (s *constSet) add(n *Node) *Node { } k := constSetKey{typ, n.Val().Interface()} + if hasUniquePos(n) { + pos = n.Pos + } + if s.m == nil { - s.m = make(map[constSetKey]*Node) + s.m = make(map[constSetKey]src.XPos) } - old, dup := s.m[k] - if !dup { - s.m[k] = n + + if prevPos, isDup := s.m[k]; isDup { + yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v", + what, nodeAndVal(n), where, + what, linestr(prevPos)) + } else { + s.m[k] = pos } - return old +} + +// nodeAndVal reports both an expression and its constant value, if +// the latter is non-obvious. +// +// TODO(mdempsky): This could probably be a fmt.go flag. +func nodeAndVal(n *Node) string { + show := n.String() + val := n.Val().Interface() + if s := fmt.Sprintf("%#v", val); show != s { + show += " (value " + s + ")" + } + return show } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index f3ec21c7cb0f2..42f47bb8c1990 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -194,30 +194,37 @@ func Fatalf(fmt_ string, args ...interface{}) { errorexit() } -func setlineno(n *Node) src.XPos { - lno := lineno - if n != nil { - switch n.Op { - case ONAME, OPACK: - break - - case OLITERAL, OTYPE: - if n.Sym != nil { - break - } - fallthrough +// hasUniquePos reports whether n has a unique position that can be +// used for reporting error messages. +// +// It's primarily used to distinguish references to named objects, +// whose Pos will point back to their declaration position rather than +// their usage position. +func hasUniquePos(n *Node) bool { + switch n.Op { + case ONAME, OPACK: + return false + case OLITERAL, OTYPE: + if n.Sym != nil { + return false + } + } - default: - lineno = n.Pos - if !lineno.IsKnown() { - if Debug['K'] != 0 { - Warn("setlineno: unknown position (line 0)") - } - lineno = lno - } + if !n.Pos.IsKnown() { + if Debug['K'] != 0 { + Warn("setlineno: unknown position (line 0)") } + return false } + return true +} + +func setlineno(n *Node) src.XPos { + lno := lineno + if n != nil && hasUniquePos(n) { + lineno = n.Pos + } return lno } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 6a418859541e4..1436e29bae52d 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -6,7 +6,6 @@ package gc import ( "cmd/compile/internal/types" - "fmt" "sort" ) @@ -641,23 +640,11 @@ func checkDupExprCases(exprname *Node, clauses []*Node) { continue } - if prev := cs.add(n); prev != nil { - yyerrorl(ncase.Pos, "duplicate case %s in switch\n\tprevious case at %v", - nodeAndVal(n), prev.Line()) - } + cs.add(ncase.Pos, n, "case", "switch") } } } -func nodeAndVal(n *Node) string { - show := n.String() - val := n.Val().Interface() - if s := fmt.Sprintf("%#v", val); show != s { - show += " (value " + s + ")" - } - return show -} - // walk generates an AST that implements sw, // where sw is a type switch. // The AST is generally of the form of a linear diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 4cb28d6100b14..0e680f54ae002 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2911,9 +2911,7 @@ func typecheckcomplit(n *Node) (res *Node) { r = typecheck(r, ctxExpr) r = defaultlit(r, t.Key()) l.Left = assignconv(r, t.Key(), "map key") - if cs.add(l.Left) != nil { - yyerror("duplicate key %v in map literal", l.Left) - } + cs.add(lineno, l.Left, "key", "map literal") r = l.Right pushtype(r, t.Elem()) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 454eb498c6c37..347bf2a00f771 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -1733,6 +1733,12 @@ (Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 -> (Eq(8|16|32|64) x y) (Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 -> (Neq(8|16|32|64) x y) +// Optimize bitsets +(Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [y])) && isPowerOfTwo(y) + -> (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [0])) +(Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [y])) && isPowerOfTwo(y) + -> (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [y])) (Const(8|16|32|64) [0])) + // Reassociate expressions involving // constants such that constants come first, // exposing obvious constant-folding opportunities. diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index db0b1749a89c0..6b0cd050478d6 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -89,7 +89,7 @@ func rewriteValuegeneric(v *Value) bool { case OpDiv8u: return rewriteValuegeneric_OpDiv8u_0(v) case OpEq16: - return rewriteValuegeneric_OpEq16_0(v) || rewriteValuegeneric_OpEq16_10(v) || rewriteValuegeneric_OpEq16_20(v) || rewriteValuegeneric_OpEq16_30(v) || rewriteValuegeneric_OpEq16_40(v) || rewriteValuegeneric_OpEq16_50(v) + return rewriteValuegeneric_OpEq16_0(v) || rewriteValuegeneric_OpEq16_10(v) || rewriteValuegeneric_OpEq16_20(v) || rewriteValuegeneric_OpEq16_30(v) || rewriteValuegeneric_OpEq16_40(v) || rewriteValuegeneric_OpEq16_50(v) || rewriteValuegeneric_OpEq16_60(v) case OpEq32: return rewriteValuegeneric_OpEq32_0(v) || rewriteValuegeneric_OpEq32_10(v) || rewriteValuegeneric_OpEq32_20(v) || rewriteValuegeneric_OpEq32_30(v) || rewriteValuegeneric_OpEq32_40(v) || rewriteValuegeneric_OpEq32_50(v) || rewriteValuegeneric_OpEq32_60(v) || rewriteValuegeneric_OpEq32_70(v) || rewriteValuegeneric_OpEq32_80(v) || rewriteValuegeneric_OpEq32_90(v) case OpEq32F: @@ -275,17 +275,17 @@ func rewriteValuegeneric(v *Value) bool { case OpNeg8: return rewriteValuegeneric_OpNeg8_0(v) case OpNeq16: - return rewriteValuegeneric_OpNeq16_0(v) + return rewriteValuegeneric_OpNeq16_0(v) || rewriteValuegeneric_OpNeq16_10(v) case OpNeq32: - return rewriteValuegeneric_OpNeq32_0(v) + return rewriteValuegeneric_OpNeq32_0(v) || rewriteValuegeneric_OpNeq32_10(v) case OpNeq32F: return rewriteValuegeneric_OpNeq32F_0(v) case OpNeq64: - return rewriteValuegeneric_OpNeq64_0(v) + return rewriteValuegeneric_OpNeq64_0(v) || rewriteValuegeneric_OpNeq64_10(v) case OpNeq64F: return rewriteValuegeneric_OpNeq64F_0(v) case OpNeq8: - return rewriteValuegeneric_OpNeq8_0(v) + return rewriteValuegeneric_OpNeq8_0(v) || rewriteValuegeneric_OpNeq8_10(v) case OpNeqB: return rewriteValuegeneric_OpNeqB_0(v) case OpNeqInter: @@ -12716,6 +12716,188 @@ func rewriteValuegeneric_OpEq16_50(v *Value) bool { v.AddArg(y) return true } + // match: (Eq16 (And16 x (Const16 [y])) (Const16 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq16 (And16 (Const16 [y]) x) (Const16 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq16 (Const16 [y]) (And16 x (Const16 [y]))) + // cond: isPowerOfTwo(y) + // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + return false +} +func rewriteValuegeneric_OpEq16_60(v *Value) bool { + b := v.Block + // match: (Eq16 (Const16 [y]) (And16 (Const16 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpEq32_0(v *Value) bool { @@ -19708,6 +19890,184 @@ func rewriteValuegeneric_OpEq32_90(v *Value) bool { v.AddArg(y) return true } + // match: (Eq32 (And32 x (Const32 [y])) (Const32 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst32 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq32 (And32 (Const32 [y]) x) (Const32 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq32 (Const32 [y]) (And32 x (Const32 [y]))) + // cond: isPowerOfTwo(y) + // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq32 (Const32 [y]) (And32 (Const32 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpEq32F_0(v *Value) bool { @@ -24058,6 +24418,7 @@ func rewriteValuegeneric_OpEq64_50(v *Value) bool { return false } func rewriteValuegeneric_OpEq64_60(v *Value) bool { + b := v.Block // match: (Eq64 (Const64 [0]) s:(Sub64 x y)) // cond: s.Uses == 1 // result: (Eq64 x y) @@ -24084,6 +24445,184 @@ func rewriteValuegeneric_OpEq64_60(v *Value) bool { v.AddArg(y) return true } + // match: (Eq64 (And64 x (Const64 [y])) (Const64 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq64 (And64 (Const64 [y]) x) (Const64 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq64 (Const64 [y]) (And64 x (Const64 [y]))) + // cond: isPowerOfTwo(y) + // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq64 (Const64 [y]) (And64 (Const64 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpEq64F_0(v *Value) bool { @@ -26177,6 +26716,184 @@ func rewriteValuegeneric_OpEq8_30(v *Value) bool { v.AddArg(y) return true } + // match: (Eq8 (And8 x (Const8 [y])) (Const8 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq8 (And8 (Const8 [y]) x) (Const8 [y])) + // cond: isPowerOfTwo(y) + // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq8 (Const8 [y]) (And8 x (Const8 [y]))) + // cond: isPowerOfTwo(y) + // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Eq8 (Const8 [y]) (And8 (Const8 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpEqB_0(v *Value) bool { @@ -35976,6 +36693,188 @@ func rewriteValuegeneric_OpNeq16_0(v *Value) bool { v.AddArg(y) return true } + // match: (Neq16 (And16 x (Const16 [y])) (Const16 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + return false +} +func rewriteValuegeneric_OpNeq16_10(v *Value) bool { + b := v.Block + // match: (Neq16 (And16 (Const16 [y]) x) (Const16 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq16 (Const16 [y]) (And16 x (Const16 [y]))) + // cond: isPowerOfTwo(y) + // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq16 (Const16 [y]) (And16 (Const16 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpNeq32_0(v *Value) bool { @@ -36208,6 +37107,188 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { v.AddArg(y) return true } + // match: (Neq32 (And32 x (Const32 [y])) (Const32 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst32 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + return false +} +func rewriteValuegeneric_OpNeq32_10(v *Value) bool { + b := v.Block + // match: (Neq32 (And32 (Const32 [y]) x) (Const32 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq32 (Const32 [y]) (And32 x (Const32 [y]))) + // cond: isPowerOfTwo(y) + // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq32 (Const32 [y]) (And32 (Const32 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { @@ -36481,6 +37562,188 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { v.AddArg(y) return true } + // match: (Neq64 (And64 x (Const64 [y])) (Const64 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + return false +} +func rewriteValuegeneric_OpNeq64_10(v *Value) bool { + b := v.Block + // match: (Neq64 (And64 (Const64 [y]) x) (Const64 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq64 (Const64 [y]) (And64 x (Const64 [y]))) + // cond: isPowerOfTwo(y) + // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq64 (Const64 [y]) (And64 (Const64 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { @@ -36754,6 +38017,188 @@ func rewriteValuegeneric_OpNeq8_0(v *Value) bool { v.AddArg(y) return true } + // match: (Neq8 (And8 x (Const8 [y])) (Const8 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + break + } + if v_0_1.Type != t { + break + } + y := v_0_1.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + return false +} +func rewriteValuegeneric_OpNeq8_10(v *Value) bool { + b := v.Block + // match: (Neq8 (And8 (Const8 [y]) x) (Const8 [y])) + // cond: isPowerOfTwo(y) + // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + t := v_0.Type + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + break + } + if v_0_0.Type != t { + break + } + y := v_0_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + if v_1.Type != t { + break + } + if v_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq8 (Const8 [y]) (And8 x (Const8 [y]))) + // cond: isPowerOfTwo(y) + // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + if v_1.Type != t { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } + // match: (Neq8 (Const8 [y]) (And8 (Const8 [y]) x)) + // cond: isPowerOfTwo(y) + // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + y := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + if v_1.Type != t { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + if v_1_0.Type != t { + break + } + if v_1_0.AuxInt != y { + break + } + if !(isPowerOfTwo(y)) { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = y + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = 0 + v.AddArg(v2) + return true + } return false } func rewriteValuegeneric_OpNeqB_0(v *Value) bool { diff --git a/src/encoding/json/decode.go b/src/encoding/json/decode.go index cbd71acfc6c38..df1c085917832 100644 --- a/src/encoding/json/decode.go +++ b/src/encoding/json/decode.go @@ -199,66 +199,6 @@ func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and https://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - // decodeState represents the state while decoding a JSON value. type decodeState struct { data []byte @@ -1027,10 +967,9 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. v.SetString(s) - if !isValidNumber(s) { - return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) - } break } if fromQuoted { diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index d66be44d4e134..8dcb08cbd2776 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -448,6 +448,7 @@ var unmarshalTests = []unmarshalTest{ {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, {in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}}, + {in: `{"F3": -}`, ptr: new(V), out: V{F3: Number("-")}, err: &SyntaxError{msg: "invalid character '}' in numeric literal", Offset: 9}}, // raw value errors {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index 67412763d6400..07d3098f1c1ed 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -611,6 +611,66 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { } } +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and https://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") diff --git a/src/net/http/transport.go b/src/net/http/transport.go index f9d9f4451cdb6..ee279877e02e2 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -953,6 +953,7 @@ func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) { t.idleConnWait = make(map[connectMethodKey]wantConnQueue) } q := t.idleConnWait[w.key] + q.cleanFront() q.pushBack(w) t.idleConnWait[w.key] = q return false @@ -1137,7 +1138,7 @@ func (q *wantConnQueue) pushBack(w *wantConn) { q.tail = append(q.tail, w) } -// popFront removes and returns the w at the front of the queue. +// popFront removes and returns the wantConn at the front of the queue. func (q *wantConnQueue) popFront() *wantConn { if q.headPos >= len(q.head) { if len(q.tail) == 0 { @@ -1152,6 +1153,30 @@ func (q *wantConnQueue) popFront() *wantConn { return w } +// peekFront returns the wantConn at the front of the queue without removing it. +func (q *wantConnQueue) peekFront() *wantConn { + if q.headPos < len(q.head) { + return q.head[q.headPos] + } + if len(q.tail) > 0 { + return q.tail[0] + } + return nil +} + +// cleanFront pops any wantConns that are no longer waiting from the head of the +// queue, reporting whether any were popped. +func (q *wantConnQueue) cleanFront() (cleaned bool) { + for { + w := q.peekFront() + if w == nil || w.waiting() { + return cleaned + } + q.popFront() + cleaned = true + } +} + // getConn dials and creates a new persistConn to the target as // specified in the connectMethod. This includes doing a proxy CONNECT // and/or setting up TLS. If this doesn't return an error, the persistConn @@ -1261,6 +1286,7 @@ func (t *Transport) queueForDial(w *wantConn) { t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue) } q := t.connsPerHostWait[w.key] + q.cleanFront() q.pushBack(w) t.connsPerHostWait[w.key] = q } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 1a6f631ea2093..23afff5d84316 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -1658,6 +1658,176 @@ func TestTransportPersistConnLeakShortBody(t *testing.T) { } } +// A countedConn is a net.Conn that decrements an atomic counter when finalized. +type countedConn struct { + net.Conn +} + +// A countingDialer dials connections and counts the number that remain reachable. +type countingDialer struct { + dialer net.Dialer + mu sync.Mutex + total, live int64 +} + +func (d *countingDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := d.dialer.DialContext(ctx, network, address) + if err != nil { + return nil, err + } + + counted := new(countedConn) + counted.Conn = conn + + d.mu.Lock() + defer d.mu.Unlock() + d.total++ + d.live++ + + runtime.SetFinalizer(counted, d.decrement) + return counted, nil +} + +func (d *countingDialer) decrement(*countedConn) { + d.mu.Lock() + defer d.mu.Unlock() + d.live-- +} + +func (d *countingDialer) Read() (total, live int64) { + d.mu.Lock() + defer d.mu.Unlock() + return d.total, d.live +} + +func TestTransportPersistConnLeakNeverIdle(t *testing.T) { + defer afterTest(t) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + // Close every connection so that it cannot be kept alive. + conn, _, err := w.(Hijacker).Hijack() + if err != nil { + t.Errorf("Hijack failed unexpectedly: %v", err) + return + } + conn.Close() + })) + defer ts.Close() + + var d countingDialer + c := ts.Client() + c.Transport.(*Transport).DialContext = d.DialContext + + body := []byte("Hello") + for i := 0; ; i++ { + total, live := d.Read() + if live < total { + break + } + if i >= 1<<12 { + t.Fatalf("Count of live client net.Conns (%d) not lower than total (%d) after %d Do / GC iterations.", live, total, i) + } + + req, err := NewRequest("POST", ts.URL, bytes.NewReader(body)) + if err != nil { + t.Fatal(err) + } + _, err = c.Do(req) + if err == nil { + t.Fatal("expected broken connection") + } + + runtime.GC() + } +} + +type countedContext struct { + context.Context +} + +type contextCounter struct { + mu sync.Mutex + live int64 +} + +func (cc *contextCounter) Track(ctx context.Context) context.Context { + counted := new(countedContext) + counted.Context = ctx + cc.mu.Lock() + defer cc.mu.Unlock() + cc.live++ + runtime.SetFinalizer(counted, cc.decrement) + return counted +} + +func (cc *contextCounter) decrement(*countedContext) { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.live-- +} + +func (cc *contextCounter) Read() (live int64) { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.live +} + +func TestTransportPersistConnContextLeakMaxConnsPerHost(t *testing.T) { + defer afterTest(t) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + runtime.Gosched() + w.WriteHeader(StatusOK) + })) + defer ts.Close() + + c := ts.Client() + c.Transport.(*Transport).MaxConnsPerHost = 1 + + ctx := context.Background() + body := []byte("Hello") + doPosts := func(cc *contextCounter) { + var wg sync.WaitGroup + for n := 64; n > 0; n-- { + wg.Add(1) + go func() { + defer wg.Done() + + ctx := cc.Track(ctx) + req, err := NewRequest("POST", ts.URL, bytes.NewReader(body)) + if err != nil { + t.Error(err) + } + + _, err = c.Do(req.WithContext(ctx)) + if err != nil { + t.Errorf("Do failed with error: %v", err) + } + }() + } + wg.Wait() + } + + var initialCC contextCounter + doPosts(&initialCC) + + // flushCC exists only to put pressure on the GC to finalize the initialCC + // contexts: the flushCC allocations should eventually displace the initialCC + // allocations. + var flushCC contextCounter + for i := 0; ; i++ { + live := initialCC.Read() + if live == 0 { + break + } + if i >= 100 { + t.Fatalf("%d Contexts still not finalized after %d GC cycles.", live, i) + } + doPosts(&flushCC) + runtime.GC() + } +} + // This used to crash; https://golang.org/issue/3266 func TestTransportIdleConnCrash(t *testing.T) { defer afterTest(t) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 93d329d15e7a9..df98783385576 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -413,7 +413,7 @@ func releaseSudog(s *sudog) { // use the result as an address at which to start executing code. //go:nosplit func funcPC(f interface{}) uintptr { - return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) + return *(*uintptr)(efaceOf(&f).data) } // called from assembly @@ -3253,14 +3253,14 @@ func newproc(siz int32, fn *funcval) { gp := getg() pc := getcallerpc() systemstack(func() { - newproc1(fn, (*uint8)(argp), siz, gp, pc) + newproc1(fn, argp, siz, gp, pc) }) } // Create a new g running fn with narg bytes of arguments starting // at argp. callerpc is the address of the go statement that created // this. The new g is put on the queue of g's waiting to run. -func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintptr) { +func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) { _g_ := getg() if fn == nil { @@ -3305,7 +3305,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt spArg += sys.MinFrameSize } if narg > 0 { - memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) + memmove(unsafe.Pointer(spArg), argp, uintptr(narg)) // This is a stack-to-stack copy. If write barriers // are enabled and the source stack is grey (the // destination is always black), then perform a diff --git a/test/codegen/bits.go b/test/codegen/bits.go index 65d57c8f9f078..18f9daf7cf394 100644 --- a/test/codegen/bits.go +++ b/test/codegen/bits.go @@ -314,3 +314,15 @@ func op_orn(x, y uint32) uint32 { // arm64:`ORN\t`,-`ORR` return x | ^y } + +// check bitsets +func bitSetPowerOf2Test(x int) bool { + // amd64:"BTL\t[$]3" + return x&8 == 8 +} + +func bitSetTest(x int) bool { + // amd64:"ANDQ\t[$]9, AX" + // amd64:"CMPQ\tAX, [$]9" + return x&9 == 9 +} diff --git a/test/fixedbugs/issue33460.go b/test/fixedbugs/issue33460.go new file mode 100644 index 0000000000000..1061d3e746c26 --- /dev/null +++ b/test/fixedbugs/issue33460.go @@ -0,0 +1,37 @@ +// errorcheck + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +const ( + zero = iota + one + two + three +) + +const iii int = 0x3 + +func f(v int) { + switch v { + case zero, one: + case two, one: // ERROR "previous case at LINE-1" + + case three: + case 3: // ERROR "previous case at LINE-1" + case iii: // ERROR "previous case at LINE-2" + } +} + +const b = "b" + +var _ = map[string]int{ + "a": 0, + b: 1, + "a": 2, // ERROR "previous key at LINE-2" + "b": 3, // ERROR "previous key at LINE-2" + "b": 4, // ERROR "previous key at LINE-3" +}