Commit 515f04bd authored by George Hotz's avatar George Hotz

minigeth builds

parents
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import "math/big"
// Common big integers often used
var (
Big1 = big.NewInt(1)
Big2 = big.NewInt(2)
Big3 = big.NewInt(3)
Big0 = big.NewInt(0)
Big32 = big.NewInt(32)
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
)
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package common contains various helper functions.
package common
import (
"encoding/hex"
)
// FromHex returns the bytes represented by the hexadecimal string s.
// s may be prefixed with "0x".
func FromHex(s string) []byte {
if has0xPrefix(s) {
s = s[2:]
}
if len(s)%2 == 1 {
s = "0" + s
}
return Hex2Bytes(s)
}
// CopyBytes returns an exact copy of the provided bytes.
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
// has0xPrefix validates str begins with '0x' or '0X'.
func has0xPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
// isHexCharacter returns bool of c being a valid hexadecimal.
func isHexCharacter(c byte) bool {
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
}
// isHex validates whether each byte is valid hexadecimal string.
func isHex(str string) bool {
if len(str)%2 != 0 {
return false
}
for _, c := range []byte(str) {
if !isHexCharacter(c) {
return false
}
}
return true
}
// Bytes2Hex returns the hexadecimal encoding of d.
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
// Hex2BytesFixed returns bytes of a specified fixed length flen.
func Hex2BytesFixed(str string, flen int) []byte {
h, _ := hex.DecodeString(str)
if len(h) == flen {
return h
}
if len(h) > flen {
return h[len(h)-flen:]
}
hh := make([]byte, flen)
copy(hh[flen-len(h):flen], h)
return hh
}
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded, slice)
return padded
}
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
// TrimLeftZeroes returns a subslice of s without leading zeroes
func TrimLeftZeroes(s []byte) []byte {
idx := 0
for ; idx < len(s); idx++ {
if s[idx] != 0 {
break
}
}
return s[idx:]
}
// TrimRightZeroes returns a subslice of s without trailing zeroes
func TrimRightZeroes(s []byte) []byte {
idx := len(s)
for ; idx > 0; idx-- {
if s[idx-1] != 0 {
break
}
}
return s[:idx]
}
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
Package hexutil implements hex encoding with 0x prefix.
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
Encoding Rules
All hex data must have prefix "0x".
For byte slices, the hex data must be of even length. An empty byte slice
encodes as "0x".
Integers are encoded using the least amount of digits (no leading zero digits). Their
encoding may be of uneven length. The number zero encodes as "0x0".
*/
package hexutil
import (
"encoding/hex"
"fmt"
"math/big"
"strconv"
)
const uintBits = 32 << (uint64(^uint(0)) >> 63)
// Errors
var (
ErrEmptyString = &decError{"empty hex string"}
ErrSyntax = &decError{"invalid hex string"}
ErrMissingPrefix = &decError{"hex string without 0x prefix"}
ErrOddLength = &decError{"hex string of odd length"}
ErrEmptyNumber = &decError{"hex string \"0x\""}
ErrLeadingZero = &decError{"hex number with leading zero digits"}
ErrUint64Range = &decError{"hex number > 64 bits"}
ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
ErrBig256Range = &decError{"hex number > 256 bits"}
)
type decError struct{ msg string }
func (err decError) Error() string { return err.msg }
// Decode decodes a hex string with 0x prefix.
func Decode(input string) ([]byte, error) {
if len(input) == 0 {
return nil, ErrEmptyString
}
if !has0xPrefix(input) {
return nil, ErrMissingPrefix
}
b, err := hex.DecodeString(input[2:])
if err != nil {
err = mapError(err)
}
return b, err
}
// MustDecode decodes a hex string with 0x prefix. It panics for invalid input.
func MustDecode(input string) []byte {
dec, err := Decode(input)
if err != nil {
panic(err)
}
return dec
}
// Encode encodes b as a hex string with 0x prefix.
func Encode(b []byte) string {
enc := make([]byte, len(b)*2+2)
copy(enc, "0x")
hex.Encode(enc[2:], b)
return string(enc)
}
// DecodeUint64 decodes a hex string with 0x prefix as a quantity.
func DecodeUint64(input string) (uint64, error) {
raw, err := checkNumber(input)
if err != nil {
return 0, err
}
dec, err := strconv.ParseUint(raw, 16, 64)
if err != nil {
err = mapError(err)
}
return dec, err
}
// MustDecodeUint64 decodes a hex string with 0x prefix as a quantity.
// It panics for invalid input.
func MustDecodeUint64(input string) uint64 {
dec, err := DecodeUint64(input)
if err != nil {
panic(err)
}
return dec
}
// EncodeUint64 encodes i as a hex string with 0x prefix.
func EncodeUint64(i uint64) string {
enc := make([]byte, 2, 10)
copy(enc, "0x")
return string(strconv.AppendUint(enc, i, 16))
}
var bigWordNibbles int
func init() {
// This is a weird way to compute the number of nibbles required for big.Word.
// The usual way would be to use constant arithmetic but go vet can't handle that.
b, _ := new(big.Int).SetString("FFFFFFFFFF", 16)
switch len(b.Bits()) {
case 1:
bigWordNibbles = 16
case 2:
bigWordNibbles = 8
default:
panic("weird big.Word size")
}
}
// DecodeBig decodes a hex string with 0x prefix as a quantity.
// Numbers larger than 256 bits are not accepted.
func DecodeBig(input string) (*big.Int, error) {
raw, err := checkNumber(input)
if err != nil {
return nil, err
}
if len(raw) > 64 {
return nil, ErrBig256Range
}
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
return nil, ErrSyntax
}
words[i] *= 16
words[i] += big.Word(nib)
}
end = start
}
dec := new(big.Int).SetBits(words)
return dec, nil
}
// MustDecodeBig decodes a hex string with 0x prefix as a quantity.
// It panics for invalid input.
func MustDecodeBig(input string) *big.Int {
dec, err := DecodeBig(input)
if err != nil {
panic(err)
}
return dec
}
// EncodeBig encodes bigint as a hex string with 0x prefix.
// The sign of the integer is ignored.
func EncodeBig(bigint *big.Int) string {
nbits := bigint.BitLen()
if nbits == 0 {
return "0x0"
}
return fmt.Sprintf("%#x", bigint)
}
func has0xPrefix(input string) bool {
return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
}
func checkNumber(input string) (raw string, err error) {
if len(input) == 0 {
return "", ErrEmptyString
}
if !has0xPrefix(input) {
return "", ErrMissingPrefix
}
input = input[2:]
if len(input) == 0 {
return "", ErrEmptyNumber
}
if len(input) > 1 && input[0] == '0' {
return "", ErrLeadingZero
}
return input, nil
}
const badNibble = ^uint64(0)
func decodeNibble(in byte) uint64 {
switch {
case in >= '0' && in <= '9':
return uint64(in - '0')
case in >= 'A' && in <= 'F':
return uint64(in - 'A' + 10)
case in >= 'a' && in <= 'f':
return uint64(in - 'a' + 10)
default:
return badNibble
}
}
func mapError(err error) error {
if err, ok := err.(*strconv.NumError); ok {
switch err.Err {
case strconv.ErrRange:
return ErrUint64Range
case strconv.ErrSyntax:
return ErrSyntax
}
}
if _, ok := err.(hex.InvalidByteError); ok {
return ErrSyntax
}
if err == hex.ErrLength {
return ErrOddLength
}
return err
}
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package hexutil
import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"reflect"
"strconv"
)
var (
bytesT = reflect.TypeOf(Bytes(nil))
bigT = reflect.TypeOf((*Big)(nil))
uintT = reflect.TypeOf(Uint(0))
uint64T = reflect.TypeOf(Uint64(0))
)
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
// The empty slice marshals as "0x".
type Bytes []byte
// MarshalText implements encoding.TextMarshaler
func (b Bytes) MarshalText() ([]byte, error) {
result := make([]byte, len(b)*2+2)
copy(result, `0x`)
hex.Encode(result[2:], b)
return result, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Bytes) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(bytesT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (b *Bytes) UnmarshalText(input []byte) error {
raw, err := checkText(input, true)
if err != nil {
return err
}
dec := make([]byte, len(raw)/2)
if _, err = hex.Decode(dec, raw); err != nil {
err = mapError(err)
} else {
*b = dec
}
return err
}
// String returns the hex encoding of b.
func (b Bytes) String() string {
return Encode(b)
}
// ImplementsGraphQLType returns true if Bytes implements the specified GraphQL type.
func (b Bytes) ImplementsGraphQLType(name string) bool { return name == "Bytes" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
data, err := Decode(input)
if err != nil {
return err
}
*b = data
default:
err = fmt.Errorf("unexpected type %T for Bytes", input)
}
return err
}
// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
// determines the required input length. This function is commonly used to implement the
// UnmarshalJSON method for fixed-size types.
func UnmarshalFixedJSON(typ reflect.Type, input, out []byte) error {
if !isString(input) {
return errNonString(typ)
}
return wrapTypeError(UnmarshalFixedText(typ.String(), input[1:len(input)-1], out), typ)
}
// UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out
// determines the required input length. This function is commonly used to implement the
// UnmarshalText method for fixed-size types.
func UnmarshalFixedText(typname string, input, out []byte) error {
raw, err := checkText(input, true)
if err != nil {
return err
}
if len(raw)/2 != len(out) {
return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname)
}
// Pre-verify syntax before modifying out.
for _, b := range raw {
if decodeNibble(b) == badNibble {
return ErrSyntax
}
}
hex.Decode(out, raw)
return nil
}
// UnmarshalFixedUnprefixedText decodes the input as a string with optional 0x prefix. The
// length of out determines the required input length. This function is commonly used to
// implement the UnmarshalText method for fixed-size types.
func UnmarshalFixedUnprefixedText(typname string, input, out []byte) error {
raw, err := checkText(input, false)
if err != nil {
return err
}
if len(raw)/2 != len(out) {
return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname)
}
// Pre-verify syntax before modifying out.
for _, b := range raw {
if decodeNibble(b) == badNibble {
return ErrSyntax
}
}
hex.Decode(out, raw)
return nil
}
// Big marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
//
// Negative integers are not supported at this time. Attempting to marshal them will
// return an error. Values larger than 256bits are rejected by Unmarshal but will be
// marshaled without error.
type Big big.Int
// MarshalText implements encoding.TextMarshaler
func (b Big) MarshalText() ([]byte, error) {
return []byte(EncodeBig((*big.Int)(&b))), nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Big) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(bigT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bigT)
}
// UnmarshalText implements encoding.TextUnmarshaler
func (b *Big) UnmarshalText(input []byte) error {
raw, err := checkNumberText(input)
if err != nil {
return err
}
if len(raw) > 64 {
return ErrBig256Range
}
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
return ErrSyntax
}
words[i] *= 16
words[i] += big.Word(nib)
}
end = start
}
var dec big.Int
dec.SetBits(words)
*b = (Big)(dec)
return nil
}
// ToInt converts b to a big.Int.
func (b *Big) ToInt() *big.Int {
return (*big.Int)(b)
}
// String returns the hex encoding of b.
func (b *Big) String() string {
return EncodeBig(b.ToInt())
}
// ImplementsGraphQLType returns true if Big implements the provided GraphQL type.
func (b Big) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Big) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
return b.UnmarshalText([]byte(input))
case int32:
var num big.Int
num.SetInt64(int64(input))
*b = Big(num)
default:
err = fmt.Errorf("unexpected type %T for BigInt", input)
}
return err
}
// Uint64 marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint64 uint64
// MarshalText implements encoding.TextMarshaler.
func (b Uint64) MarshalText() ([]byte, error) {
buf := make([]byte, 2, 10)
copy(buf, `0x`)
buf = strconv.AppendUint(buf, uint64(b), 16)
return buf, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Uint64) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(uint64T)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uint64T)
}
// UnmarshalText implements encoding.TextUnmarshaler
func (b *Uint64) UnmarshalText(input []byte) error {
raw, err := checkNumberText(input)
if err != nil {
return err
}
if len(raw) > 16 {
return ErrUint64Range
}
var dec uint64
for _, byte := range raw {
nib := decodeNibble(byte)
if nib == badNibble {
return ErrSyntax
}
dec *= 16
dec += nib
}
*b = Uint64(dec)
return nil
}
// String returns the hex encoding of b.
func (b Uint64) String() string {
return EncodeUint64(uint64(b))
}
// ImplementsGraphQLType returns true if Uint64 implements the provided GraphQL type.
func (b Uint64) ImplementsGraphQLType(name string) bool { return name == "Long" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
return b.UnmarshalText([]byte(input))
case int32:
*b = Uint64(input)
default:
err = fmt.Errorf("unexpected type %T for Long", input)
}
return err
}
// Uint marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint uint
// MarshalText implements encoding.TextMarshaler.
func (b Uint) MarshalText() ([]byte, error) {
return Uint64(b).MarshalText()
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Uint) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(uintT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uintT)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (b *Uint) UnmarshalText(input []byte) error {
var u64 Uint64
err := u64.UnmarshalText(input)
if u64 > Uint64(^uint(0)) || err == ErrUint64Range {
return ErrUintRange
} else if err != nil {
return err
}
*b = Uint(u64)
return nil
}
// String returns the hex encoding of b.
func (b Uint) String() string {
return EncodeUint64(uint64(b))
}
func isString(input []byte) bool {
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
}
func bytesHave0xPrefix(input []byte) bool {
return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
}
func checkText(input []byte, wantPrefix bool) ([]byte, error) {
if len(input) == 0 {
return nil, nil // empty strings are allowed
}
if bytesHave0xPrefix(input) {
input = input[2:]
} else if wantPrefix {
return nil, ErrMissingPrefix
}
if len(input)%2 != 0 {
return nil, ErrOddLength
}
return input, nil
}
func checkNumberText(input []byte) (raw []byte, err error) {
if len(input) == 0 {
return nil, nil // empty strings are allowed
}
if !bytesHave0xPrefix(input) {
return nil, ErrMissingPrefix
}
input = input[2:]
if len(input) == 0 {
return nil, ErrEmptyNumber
}
if len(input) > 1 && input[0] == '0' {
return nil, ErrLeadingZero
}
return input, nil
}
func wrapTypeError(err error, typ reflect.Type) error {
if _, ok := err.(*decError); ok {
return &json.UnmarshalTypeError{Value: err.Error(), Type: typ}
}
return err
}
func errNonString(typ reflect.Type) error {
return &json.UnmarshalTypeError{Value: "non-string", Type: typ}
}
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package math provides integer math utilities.
package math
import (
"fmt"
"math/big"
)
// Various big integer limit values.
var (
tt255 = BigPow(2, 255)
tt256 = BigPow(2, 256)
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
tt63 = BigPow(2, 63)
MaxBig256 = new(big.Int).Set(tt256m1)
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
)
const (
// number of bits in a big.Word
wordBits = 32 << (uint64(^big.Word(0)) >> 63)
// number of bytes in a big.Word
wordBytes = wordBits / 8
)
// HexOrDecimal256 marshals big.Int as hex or decimal.
type HexOrDecimal256 big.Int
// NewHexOrDecimal256 creates a new HexOrDecimal256
func NewHexOrDecimal256(x int64) *HexOrDecimal256 {
b := big.NewInt(x)
h := HexOrDecimal256(*b)
return &h
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (i *HexOrDecimal256) UnmarshalText(input []byte) error {
bigint, ok := ParseBig256(string(input))
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", input)
}
*i = HexOrDecimal256(*bigint)
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (i *HexOrDecimal256) MarshalText() ([]byte, error) {
if i == nil {
return []byte("0x0"), nil
}
return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil
}
// Decimal256 unmarshals big.Int as a decimal string. When unmarshalling,
// it however accepts either "0x"-prefixed (hex encoded) or non-prefixed (decimal)
type Decimal256 big.Int
// NewHexOrDecimal256 creates a new Decimal256
func NewDecimal256(x int64) *Decimal256 {
b := big.NewInt(x)
d := Decimal256(*b)
return &d
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (i *Decimal256) UnmarshalText(input []byte) error {
bigint, ok := ParseBig256(string(input))
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", input)
}
*i = Decimal256(*bigint)
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (i *Decimal256) MarshalText() ([]byte, error) {
return []byte(i.String()), nil
}
// String implements Stringer.
func (i *Decimal256) String() string {
if i == nil {
return "0"
}
return fmt.Sprintf("%#d", (*big.Int)(i))
}
// ParseBig256 parses s as a 256 bit integer in decimal or hexadecimal syntax.
// Leading zeros are accepted. The empty string parses as zero.
func ParseBig256(s string) (*big.Int, bool) {
if s == "" {
return new(big.Int), true
}
var bigint *big.Int
var ok bool
if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") {
bigint, ok = new(big.Int).SetString(s[2:], 16)
} else {
bigint, ok = new(big.Int).SetString(s, 10)
}
if ok && bigint.BitLen() > 256 {
bigint, ok = nil, false
}
return bigint, ok
}
// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid.
func MustParseBig256(s string) *big.Int {
v, ok := ParseBig256(s)
if !ok {
panic("invalid 256 bit integer: " + s)
}
return v
}
// BigPow returns a ** b as a big integer.
func BigPow(a, b int64) *big.Int {
r := big.NewInt(a)
return r.Exp(r, big.NewInt(b), nil)
}
// BigMax returns the larger of x or y.
func BigMax(x, y *big.Int) *big.Int {
if x.Cmp(y) < 0 {
return y
}
return x
}
// BigMin returns the smaller of x or y.
func BigMin(x, y *big.Int) *big.Int {
if x.Cmp(y) > 0 {
return y
}
return x
}
// FirstBitSet returns the index of the first 1 bit in v, counting from LSB.
func FirstBitSet(v *big.Int) int {
for i := 0; i < v.BitLen(); i++ {
if v.Bit(i) > 0 {
return i
}
}
return v.BitLen()
}
// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length
// of the slice is at least n bytes.
func PaddedBigBytes(bigint *big.Int, n int) []byte {
if bigint.BitLen()/8 >= n {
return bigint.Bytes()
}
ret := make([]byte, n)
ReadBits(bigint, ret)
return ret
}
// bigEndianByteAt returns the byte at position n,
// in Big-Endian encoding
// So n==0 returns the least significant byte
func bigEndianByteAt(bigint *big.Int, n int) byte {
words := bigint.Bits()
// Check word-bucket the byte will reside in
i := n / wordBytes
if i >= len(words) {
return byte(0)
}
word := words[i]
// Offset of the byte
shift := 8 * uint(n%wordBytes)
return byte(word >> shift)
}
// Byte returns the byte at position n,
// with the supplied padlength in Little-Endian encoding.
// n==0 returns the MSB
// Example: bigint '5', padlength 32, n=31 => 5
func Byte(bigint *big.Int, padlength, n int) byte {
if n >= padlength {
return byte(0)
}
return bigEndianByteAt(bigint, padlength-1-n)
}
// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
// that buf has enough space. If buf is too short the result will be incomplete.
func ReadBits(bigint *big.Int, buf []byte) {
i := len(buf)
for _, d := range bigint.Bits() {
for j := 0; j < wordBytes && i > 0; j++ {
i--
buf[i] = byte(d)
d >>= 8
}
}
}
// U256 encodes as a 256 bit two's complement number. This operation is destructive.
func U256(x *big.Int) *big.Int {
return x.And(x, tt256m1)
}
// U256Bytes converts a big Int into a 256bit EVM number.
// This operation is destructive.
func U256Bytes(n *big.Int) []byte {
return PaddedBigBytes(U256(n), 32)
}
// S256 interprets x as a two's complement number.
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
//
// S256(0) = 0
// S256(1) = 1
// S256(2**255) = -2**255
// S256(2**256-1) = -1
func S256(x *big.Int) *big.Int {
if x.Cmp(tt255) < 0 {
return x
}
return new(big.Int).Sub(x, tt256)
}
// Exp implements exponentiation by squaring.
// Exp returns a newly-allocated big integer and does not change
// base or exponent. The result is truncated to 256 bits.
//
// Courtesy @karalabe and @chfast
func Exp(base, exponent *big.Int) *big.Int {
result := big.NewInt(1)
for _, word := range exponent.Bits() {
for i := 0; i < wordBits; i++ {
if word&1 == 1 {
U256(result.Mul(result, base))
}
U256(base.Mul(base, base))
word >>= 1
}
}
return result
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
)
// StorageSize is a wrapper around a float value that supports user friendly
// formatting.
type StorageSize float64
// String implements the stringer interface.
func (s StorageSize) String() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2f TiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2f GiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2f MiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2f KiB", s/1024)
} else {
return fmt.Sprintf("%.2f B", s)
}
}
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (s StorageSize) TerminalString() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2fTiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2fGiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2fMiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2fKiB", s/1024)
} else {
return fmt.Sprintf("%.2fB", s)
}
}
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"bytes"
"database/sql/driver"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"math/rand"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
const (
// HashLength is the expected length of the hash
HashLength = 32
// AddressLength is the expected length of the address
AddressLength = 20
)
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
type Hash [HashLength]byte
// BytesToHash sets b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
// BigToHash sets byte representation of b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
// HexToHash sets byte representation of s to hash.
// If b is larger than len(h), b will be cropped from the left.
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
// Big converts a hash to a big integer.
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
// Hex converts a hash to a hex string.
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x..%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter.
// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
func (h Hash) Format(s fmt.State, c rune) {
hexb := make([]byte, 2+len(h)*2)
copy(hexb, "0x")
hex.Encode(hexb[2:], h[:])
switch c {
case 'x', 'X':
if !s.Flag('#') {
hexb = hexb[2:]
}
if c == 'X' {
hexb = bytes.ToUpper(hexb)
}
fallthrough
case 'v', 's':
s.Write(hexb)
case 'q':
q := []byte{'"'}
s.Write(q)
s.Write(hexb)
s.Write(q)
case 'd':
fmt.Fprint(s, ([len(h)]byte)(h))
default:
fmt.Fprintf(s, "%%!%c(hash=%x)", c, h)
}
}
// UnmarshalText parses a hash in hex syntax.
func (h *Hash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Hash", input, h[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
}
// MarshalText returns the hex representation of h.
func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// SetBytes sets the hash to the value of b.
// If b is larger than len(h), b will be cropped from the left.
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
m := rand.Intn(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
return reflect.ValueOf(h)
}
// Scan implements Scanner for database/sql.
func (h *Hash) Scan(src interface{}) error {
srcB, ok := src.([]byte)
if !ok {
return fmt.Errorf("can't scan %T into Hash", src)
}
if len(srcB) != HashLength {
return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength)
}
copy(h[:], srcB)
return nil
}
// Value implements valuer for database/sql.
func (h Hash) Value() (driver.Value, error) {
return h[:], nil
}
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
func (Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (h *Hash) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
err = h.UnmarshalText([]byte(input))
default:
err = fmt.Errorf("unexpected type %T for Hash", input)
}
return err
}
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
// UnmarshalText decodes the hash from hex. The 0x prefix is optional.
func (h *UnprefixedHash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedHash", input, h[:])
}
// MarshalText encodes the hash as hex.
func (h UnprefixedHash) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(h[:])), nil
}
/////////// Address
// Address represents the 20 byte address of an Ethereum account.
type Address [AddressLength]byte
// BytesToAddress returns Address with value b.
// If b is larger than len(h), b will be cropped from the left.
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
// BigToAddress returns Address with byte values of b.
// If b is larger than len(h), b will be cropped from the left.
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
// HexToAddress returns Address with byte values of s.
// If s is larger than len(h), s will be cropped from the left.
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
// IsHexAddress verifies whether a string can represent a valid hex-encoded
// Ethereum address or not.
func IsHexAddress(s string) bool {
if has0xPrefix(s) {
s = s[2:]
}
return len(s) == 2*AddressLength && isHex(s)
}
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }
// Hash converts an address to a hash by left-padding it with zeros.
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
return string(a.checksumHex())
}
// String implements fmt.Stringer.
func (a Address) String() string {
return a.Hex()
}
func (a *Address) checksumHex() []byte {
buf := a.hex()
// compute checksum
sha := sha3.NewLegacyKeccak256()
sha.Write(buf[2:])
hash := sha.Sum(nil)
for i := 2; i < len(buf); i++ {
hashByte := hash[(i-2)/2]
if i%2 == 0 {
hashByte = hashByte >> 4
} else {
hashByte &= 0xf
}
if buf[i] > '9' && hashByte > 7 {
buf[i] -= 32
}
}
return buf[:]
}
func (a Address) hex() []byte {
var buf [len(a)*2 + 2]byte
copy(buf[:2], "0x")
hex.Encode(buf[2:], a[:])
return buf[:]
}
// Format implements fmt.Formatter.
// Address supports the %v, %s, %v, %x, %X and %d format verbs.
func (a Address) Format(s fmt.State, c rune) {
switch c {
case 'v', 's':
s.Write(a.checksumHex())
case 'q':
q := []byte{'"'}
s.Write(q)
s.Write(a.checksumHex())
s.Write(q)
case 'x', 'X':
// %x disables the checksum.
hex := a.hex()
if !s.Flag('#') {
hex = hex[2:]
}
if c == 'X' {
hex = bytes.ToUpper(hex)
}
s.Write(hex)
case 'd':
fmt.Fprint(s, ([len(a)]byte)(a))
default:
fmt.Fprintf(s, "%%!%c(address=%x)", c, a)
}
}
// SetBytes sets the address to the value of b.
// If b is larger than len(a), b will be cropped from the left.
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// MarshalText returns the hex representation of a.
func (a Address) MarshalText() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalText()
}
// UnmarshalText parses a hash in hex syntax.
func (a *Address) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Address", input, a[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
// Scan implements Scanner for database/sql.
func (a *Address) Scan(src interface{}) error {
srcB, ok := src.([]byte)
if !ok {
return fmt.Errorf("can't scan %T into Address", src)
}
if len(srcB) != AddressLength {
return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength)
}
copy(a[:], srcB)
return nil
}
// Value implements valuer for database/sql.
func (a Address) Value() (driver.Value, error) {
return a[:], nil
}
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
func (a Address) ImplementsGraphQLType(name string) bool { return name == "Address" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (a *Address) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
err = a.UnmarshalText([]byte(input))
default:
err = fmt.Errorf("unexpected type %T for Address", input)
}
return err
}
// UnprefixedAddress allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
func (a *UnprefixedAddress) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedAddress", input, a[:])
}
// MarshalText encodes the address as hex.
func (a UnprefixedAddress) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(a[:])), nil
}
// MixedcaseAddress retains the original string, which may or may not be
// correctly checksummed
type MixedcaseAddress struct {
addr Address
original string
}
// NewMixedcaseAddress constructor (mainly for testing)
func NewMixedcaseAddress(addr Address) MixedcaseAddress {
return MixedcaseAddress{addr: addr, original: addr.Hex()}
}
// NewMixedcaseAddressFromString is mainly meant for unit-testing
func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) {
if !IsHexAddress(hexaddr) {
return nil, errors.New("invalid address")
}
a := FromHex(hexaddr)
return &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil
}
// UnmarshalJSON parses MixedcaseAddress
func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error {
if err := hexutil.UnmarshalFixedJSON(addressT, input, ma.addr[:]); err != nil {
return err
}
return json.Unmarshal(input, &ma.original)
}
// MarshalJSON marshals the original value
func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) {
if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") {
return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:]))
}
return json.Marshal(fmt.Sprintf("0x%s", ma.original))
}
// Address returns the address
func (ma *MixedcaseAddress) Address() Address {
return ma.addr
}
// String implements fmt.Stringer
func (ma *MixedcaseAddress) String() string {
if ma.ValidChecksum() {
return fmt.Sprintf("%s [chksum ok]", ma.original)
}
return fmt.Sprintf("%s [chksum INVALID]", ma.original)
}
// ValidChecksum returns true if the address has valid checksum
func (ma *MixedcaseAddress) ValidChecksum() bool {
return ma.original == ma.addr.Hex()
}
// Original returns the mixed-case input string
func (ma *MixedcaseAddress) Original() string {
return ma.original
}
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package consensus implements different Ethereum consensus engines.
package consensus
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
//"github.com/ethereum/go-ethereum/rpc"
)
// ChainHeaderReader defines a small collection of methods needed to access the local
// blockchain during header verification.
type ChainHeaderReader interface {
// Config retrieves the blockchain's chain configuration.
Config() *params.ChainConfig
// CurrentHeader retrieves the current header from the local chain.
CurrentHeader() *types.Header
// GetHeader retrieves a block header from the database by hash and number.
GetHeader(hash common.Hash, number uint64) *types.Header
// GetHeaderByNumber retrieves a block header from the database by number.
GetHeaderByNumber(number uint64) *types.Header
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
}
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header and/or uncle verification.
type ChainReader interface {
ChainHeaderReader
// GetBlock retrieves a block from the database by hash and number.
GetBlock(hash common.Hash, number uint64) *types.Block
}
// Engine is an algorithm agnostic consensus engine.
type Engine interface {
// Author retrieves the Ethereum address of the account that minted the given
// block, which may be different from the header's coinbase if a consensus
// engine is based on signatures.
Author(header *types.Header) (common.Address, error)
// VerifyHeader checks whether a header conforms to the consensus rules of a
// given engine. Verifying the seal may be done optionally here, or explicitly
// via the VerifySeal method.
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications (the order is that of
// the input slice).
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
// VerifyUncles verifies that the given block's uncles conform to the consensus
// rules of a given engine.
VerifyUncles(chain ChainReader, block *types.Block) error
// Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline.
Prepare(chain ChainHeaderReader, header *types.Header) error
// Finalize runs any post-transaction state modifications (e.g. block rewards)
// but does not assemble the block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header)
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
// rewards) and assembles the final block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.
//
// Note, the method returns immediately and will send the result async. More
// than one result may also be returned depending on the consensus algorithm.
Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
// SealHash returns the hash of a block prior to it being sealed.
SealHash(header *types.Header) common.Hash
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have.
CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
// APIs returns the RPC APIs this consensus engine provides.
//APIs(chain ChainHeaderReader) []rpc.API
// Close terminates any background threads maintained by the consensus engine.
Close() error
}
// PoW is a consensus engine based on proof-of-work.
type PoW interface {
Engine
// Hashrate returns the current mining hashrate of a PoW consensus engine.
Hashrate() float64
}
package core
type BlockChain struct {
// TODO: write stub BlockChain
}
package state
type StateDB struct {
// TODO: write stub StateDB
}
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/params"
/*"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"*/)
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
// StateProcessor implements Processor.
type StateProcessor struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for block rewards
}
// NewStateProcessor initialises a new StateProcessor.
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor {
return &StateProcessor{
config: config,
bc: bc,
engine: engine,
}
}
/*
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
//
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
var (
receipts types.Receipts
usedGas = new(uint64)
header = block.Header()
blockHash = block.Hash()
blockNumber = block.Number()
allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
// Mutate the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb)
}
blockContext := NewEVMBlockContext(header, p.bc, nil)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee)
if err != nil {
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
statedb.Prepare(tx.Hash(), i)
receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
if err != nil {
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles())
return receipts, allLogs, *usedGas, nil
}
func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb)
// Apply the transaction to the current state (included in the env).
result, err := ApplyMessage(evm, msg, gp)
if err != nil {
return nil, err
}
// Update the state with pending changes.
var root []byte
if config.IsByzantium(blockNumber) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(config.IsEIP158(blockNumber)).Bytes()
}
*usedGas += result.UsedGas
// Create a new receipt for the transaction, storing the intermediate root and gas used
// by the tx.
receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas}
if result.Failed() {
receipt.Status = types.ReceiptStatusFailed
} else {
receipt.Status = types.ReceiptStatusSuccessful
}
receipt.TxHash = tx.Hash()
receipt.GasUsed = result.UsedGas
// If the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
}
// Set the receipt logs and create the bloom filter.
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
receipt.BlockHash = blockHash
receipt.BlockNumber = blockNumber
receipt.TransactionIndex = uint(statedb.TxIndex())
return receipt, err
}
// ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) {
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number), header.BaseFee)
if err != nil {
return nil, err
}
// Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
}
*/
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
//go:generate gencodec -type AccessTuple -out gen_access_tuple.go
// AccessList is an EIP-2930 access list.
type AccessList []AccessTuple
// AccessTuple is the element type of an access list.
type AccessTuple struct {
Address common.Address `json:"address" gencodec:"required"`
StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"`
}
// StorageKeys returns the total number of storage keys in the access list.
func (al AccessList) StorageKeys() int {
sum := 0
for _, tuple := range al {
sum += len(tuple.StorageKeys)
}
return sum
}
// AccessListTx is the data of EIP-2930 access list transactions.
type AccessListTx struct {
ChainID *big.Int // destination chain ID
Nonce uint64 // nonce of sender account
GasPrice *big.Int // wei per gas
Gas uint64 // gas limit
To *common.Address `rlp:"nil"` // nil means contract creation
Value *big.Int // wei amount
Data []byte // contract invocation input data
AccessList AccessList // EIP-2930 access list
V, R, S *big.Int // signature values
}
// copy creates a deep copy of the transaction data and initializes all fields.
func (tx *AccessListTx) copy() TxData {
cpy := &AccessListTx{
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
Data: common.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
AccessList: make(AccessList, len(tx.AccessList)),
Value: new(big.Int),
ChainID: new(big.Int),
GasPrice: new(big.Int),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
copy(cpy.AccessList, tx.AccessList)
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
if tx.ChainID != nil {
cpy.ChainID.Set(tx.ChainID)
}
if tx.GasPrice != nil {
cpy.GasPrice.Set(tx.GasPrice)
}
if tx.V != nil {
cpy.V.Set(tx.V)
}
if tx.R != nil {
cpy.R.Set(tx.R)
}
if tx.S != nil {
cpy.S.Set(tx.S)
}
return cpy
}
// accessors for innerTx.
func (tx *AccessListTx) txType() byte { return AccessListTxType }
func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID }
func (tx *AccessListTx) protected() bool { return true }
func (tx *AccessListTx) accessList() AccessList { return tx.AccessList }
func (tx *AccessListTx) data() []byte { return tx.Data }
func (tx *AccessListTx) gas() uint64 { return tx.Gas }
func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice }
func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice }
func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
func (tx *AccessListTx) value() *big.Int { return tx.Value }
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
func (tx *AccessListTx) to() *common.Address { return tx.To }
func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
}
func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) {
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package types contains data types related to Ethereum consensus.
package types
import (
"encoding/binary"
"fmt"
"io"
"math/big"
"reflect"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
var (
EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
EmptyUncleHash = rlpHash([]*Header(nil))
)
// A BlockNonce is a 64-bit hash which proves (combined with the
// mix-hash) that a sufficient amount of computation has been carried
// out on a block.
type BlockNonce [8]byte
// EncodeNonce converts the given integer to a block nonce.
func EncodeNonce(i uint64) BlockNonce {
var n BlockNonce
binary.BigEndian.PutUint64(n[:], i)
return n
}
// Uint64 returns the integer value of a block nonce.
func (n BlockNonce) Uint64() uint64 {
return binary.BigEndian.Uint64(n[:])
}
// MarshalText encodes n as a hex string with 0x prefix.
func (n BlockNonce) MarshalText() ([]byte, error) {
return hexutil.Bytes(n[:]).MarshalText()
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
// Header represents a block header in the Ethereum blockchain.
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner" gencodec:"required"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
}
// field type overrides for gencodec
type headerMarshaling struct {
Difficulty *hexutil.Big
Number *hexutil.Big
GasLimit hexutil.Uint64
GasUsed hexutil.Uint64
Time hexutil.Uint64
Extra hexutil.Bytes
BaseFee *hexutil.Big
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
}
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
// RLP encoding.
func (h *Header) Hash() common.Hash {
return rlpHash(h)
}
var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size())
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
func (h *Header) Size() common.StorageSize {
return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
}
// SanityCheck checks a few basic things -- these checks are way beyond what
// any 'sane' production values should hold, and can mainly be used to prevent
// that the unbounded fields are stuffed with junk data to add processing
// overhead
func (h *Header) SanityCheck() error {
if h.Number != nil && !h.Number.IsUint64() {
return fmt.Errorf("too large block number: bitlen %d", h.Number.BitLen())
}
if h.Difficulty != nil {
if diffLen := h.Difficulty.BitLen(); diffLen > 80 {
return fmt.Errorf("too large block difficulty: bitlen %d", diffLen)
}
}
if eLen := len(h.Extra); eLen > 100*1024 {
return fmt.Errorf("too large block extradata: size %d", eLen)
}
if h.BaseFee != nil {
if bfLen := h.BaseFee.BitLen(); bfLen > 256 {
return fmt.Errorf("too large base fee: bitlen %d", bfLen)
}
}
return nil
}
// EmptyBody returns true if there is no additional 'body' to complete the header
// that is: no transactions and no uncles.
func (h *Header) EmptyBody() bool {
return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash
}
// EmptyReceipts returns true if there are no receipts for this header/block.
func (h *Header) EmptyReceipts() bool {
return h.ReceiptHash == EmptyRootHash
}
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
Transactions []*Transaction
Uncles []*Header
}
// Block represents an entire block in the Ethereum blockchain.
type Block struct {
header *Header
uncles []*Header
transactions Transactions
// caches
hash atomic.Value
size atomic.Value
// Td is used by package core to store the total difficulty
// of the chain up to and including the block.
td *big.Int
// These fields are used by package eth to track
// inter-peer block relay.
ReceivedAt time.Time
ReceivedFrom interface{}
}
// "external" block encoding. used for eth protocol, etc.
type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
}
// NewBlock creates a new block. The input data is copied,
// changes to header and to the field values will not affect the
// block.
//
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block {
b := &Block{header: CopyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
b.header.TxHash = EmptyRootHash
} else {
b.header.TxHash = DeriveSha(Transactions(txs), hasher)
b.transactions = make(Transactions, len(txs))
copy(b.transactions, txs)
}
if len(receipts) == 0 {
b.header.ReceiptHash = EmptyRootHash
} else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher)
b.header.Bloom = CreateBloom(receipts)
}
if len(uncles) == 0 {
b.header.UncleHash = EmptyUncleHash
} else {
b.header.UncleHash = CalcUncleHash(uncles)
b.uncles = make([]*Header, len(uncles))
for i := range uncles {
b.uncles[i] = CopyHeader(uncles[i])
}
}
return b
}
// NewBlockWithHeader creates a block with the given header data. The
// header data is copied, changes to header and to the field values
// will not affect the block.
func NewBlockWithHeader(header *Header) *Block {
return &Block{header: CopyHeader(header)}
}
// CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty)
}
if cpy.Number = new(big.Int); h.Number != nil {
cpy.Number.Set(h.Number)
}
if h.BaseFee != nil {
cpy.BaseFee = new(big.Int).Set(h.BaseFee)
}
if len(h.Extra) > 0 {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
return &cpy
}
// DecodeRLP decodes the Ethereum
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs
b.size.Store(common.StorageSize(rlp.ListSize(size)))
return nil
}
// EncodeRLP serializes b into the Ethereum RLP block format.
func (b *Block) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, extblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
})
}
// TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
if transaction.Hash() == hash {
return transaction
}
}
return nil
}
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
func (b *Block) Time() uint64 { return b.header.Time }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) }
func (b *Block) Bloom() Bloom { return b.header.Bloom }
func (b *Block) Coinbase() common.Address { return b.header.Coinbase }
func (b *Block) Root() common.Hash { return b.header.Root }
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash }
func (b *Block) TxHash() common.Hash { return b.header.TxHash }
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) BaseFee() *big.Int {
if b.header.BaseFee == nil {
return nil
}
return new(big.Int).Set(b.header.BaseFee)
}
func (b *Block) Header() *Header { return CopyHeader(b.header) }
// Body returns the non-header content of the block.
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previsouly cached value.
func (b *Block) Size() common.StorageSize {
if size := b.size.Load(); size != nil {
return size.(common.StorageSize)
}
c := writeCounter(0)
rlp.Encode(&c, b)
b.size.Store(common.StorageSize(c))
return common.StorageSize(c)
}
// SanityCheck can be used to prevent that unbounded fields are
// stuffed with junk data to add processing overhead
func (b *Block) SanityCheck() error {
return b.header.SanityCheck()
}
type writeCounter common.StorageSize
func (c *writeCounter) Write(b []byte) (int, error) {
*c += writeCounter(len(b))
return len(b), nil
}
func CalcUncleHash(uncles []*Header) common.Hash {
if len(uncles) == 0 {
return EmptyUncleHash
}
return rlpHash(uncles)
}
// WithSeal returns a new block with the data from b but the header replaced with
// the sealed one.
func (b *Block) WithSeal(header *Header) *Block {
cpy := *header
return &Block{
header: &cpy,
transactions: b.transactions,
uncles: b.uncles,
}
}
// WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
copy(block.transactions, transactions)
for i := range uncles {
block.uncles[i] = CopyHeader(uncles[i])
}
return block
}
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {
if hash := b.hash.Load(); hash != nil {
return hash.(common.Hash)
}
v := b.header.Hash()
b.hash.Store(v)
return v
}
type Blocks []*Block
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
type bytesBacked interface {
Bytes() []byte
}
const (
// BloomByteLength represents the number of bytes used in a header log bloom.
BloomByteLength = 256
// BloomBitLength represents the number of bits used in a header log bloom.
BloomBitLength = 8 * BloomByteLength
)
// Bloom represents a 2048 bit bloom filter.
type Bloom [BloomByteLength]byte
// BytesToBloom converts a byte slice to a bloom filter.
// It panics if b is not of suitable size.
func BytesToBloom(b []byte) Bloom {
var bloom Bloom
bloom.SetBytes(b)
return bloom
}
// SetBytes sets the content of b to the given bytes.
// It panics if d is not of suitable size.
func (b *Bloom) SetBytes(d []byte) {
if len(b) < len(d) {
panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d)))
}
copy(b[BloomByteLength-len(d):], d)
}
// Add adds d to the filter. Future calls of Test(d) will return true.
func (b *Bloom) Add(d []byte) {
b.add(d, make([]byte, 6))
}
// add is internal version of Add, which takes a scratch buffer for reuse (needs to be at least 6 bytes)
func (b *Bloom) add(d []byte, buf []byte) {
i1, v1, i2, v2, i3, v3 := bloomValues(d, buf)
b[i1] |= v1
b[i2] |= v2
b[i3] |= v3
}
// Big converts b to a big integer.
// Note: Converting a bloom filter to a big.Int and then calling GetBytes
// does not return the same bytes, since big.Int will trim leading zeroes
func (b Bloom) Big() *big.Int {
return new(big.Int).SetBytes(b[:])
}
// Bytes returns the backing byte slice of the bloom
func (b Bloom) Bytes() []byte {
return b[:]
}
// Test checks if the given topic is present in the bloom filter
func (b Bloom) Test(topic []byte) bool {
i1, v1, i2, v2, i3, v3 := bloomValues(topic, make([]byte, 6))
return v1 == v1&b[i1] &&
v2 == v2&b[i2] &&
v3 == v3&b[i3]
}
// MarshalText encodes b as a hex string with 0x prefix.
func (b Bloom) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
// UnmarshalText b as a hex string with 0x prefix.
func (b *Bloom) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Bloom", input, b[:])
}
// CreateBloom creates a bloom filter out of the give Receipts (+Logs)
func CreateBloom(receipts Receipts) Bloom {
buf := make([]byte, 6)
var bin Bloom
for _, receipt := range receipts {
for _, log := range receipt.Logs {
bin.add(log.Address.Bytes(), buf)
for _, b := range log.Topics {
bin.add(b[:], buf)
}
}
}
return bin
}
// LogsBloom returns the bloom bytes for the given logs
func LogsBloom(logs []*Log) []byte {
buf := make([]byte, 6)
var bin Bloom
for _, log := range logs {
bin.add(log.Address.Bytes(), buf)
for _, b := range log.Topics {
bin.add(b[:], buf)
}
}
return bin[:]
}
// Bloom9 returns the bloom filter for the given data
func Bloom9(data []byte) []byte {
var b Bloom
b.SetBytes(data)
return b.Bytes()
}
// bloomValues returns the bytes (index-value pairs) to set for the given data
func bloomValues(data []byte, hashbuf []byte) (uint, byte, uint, byte, uint, byte) {
sha := hasherPool.Get().(crypto.KeccakState)
sha.Reset()
sha.Write(data)
sha.Read(hashbuf)
hasherPool.Put(sha)
// The actual bits to flip
v1 := byte(1 << (hashbuf[1] & 0x7))
v2 := byte(1 << (hashbuf[3] & 0x7))
v3 := byte(1 << (hashbuf[5] & 0x7))
// The indices for the bytes to OR in
i1 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf)&0x7ff)>>3) - 1
i2 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[2:])&0x7ff)>>3) - 1
i3 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[4:])&0x7ff)>>3) - 1
return i1, v1, i2, v2, i3, v3
}
// BloomLookup is a convenience-method to check presence int he bloom filter
func BloomLookup(bin Bloom, topic bytesBacked) bool {
return bin.Test(topic.Bytes())
}
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
type DynamicFeeTx struct {
ChainID *big.Int
Nonce uint64
GasTipCap *big.Int
GasFeeCap *big.Int
Gas uint64
To *common.Address `rlp:"nil"` // nil means contract creation
Value *big.Int
Data []byte
AccessList AccessList
// Signature values
V *big.Int `json:"v" gencodec:"required"`
R *big.Int `json:"r" gencodec:"required"`
S *big.Int `json:"s" gencodec:"required"`
}
// copy creates a deep copy of the transaction data and initializes all fields.
func (tx *DynamicFeeTx) copy() TxData {
cpy := &DynamicFeeTx{
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
Data: common.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
AccessList: make(AccessList, len(tx.AccessList)),
Value: new(big.Int),
ChainID: new(big.Int),
GasTipCap: new(big.Int),
GasFeeCap: new(big.Int),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
copy(cpy.AccessList, tx.AccessList)
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
if tx.ChainID != nil {
cpy.ChainID.Set(tx.ChainID)
}
if tx.GasTipCap != nil {
cpy.GasTipCap.Set(tx.GasTipCap)
}
if tx.GasFeeCap != nil {
cpy.GasFeeCap.Set(tx.GasFeeCap)
}
if tx.V != nil {
cpy.V.Set(tx.V)
}
if tx.R != nil {
cpy.R.Set(tx.R)
}
if tx.S != nil {
cpy.S.Set(tx.S)
}
return cpy
}
// accessors for innerTx.
func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType }
func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID }
func (tx *DynamicFeeTx) protected() bool { return true }
func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList }
func (tx *DynamicFeeTx) data() []byte { return tx.Data }
func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas }
func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap }
func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
}
func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) {
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// hasherPool holds LegacyKeccak256 hashers for rlpHash.
var hasherPool = sync.Pool{
New: func() interface{} { return sha3.NewLegacyKeccak256() },
}
// deriveBufferPool holds temporary encoder buffers for DeriveSha and TX encoding.
var encodeBufferPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// rlpHash encodes x and hashes the encoded bytes.
func rlpHash(x interface{}) (h common.Hash) {
sha := hasherPool.Get().(crypto.KeccakState)
defer hasherPool.Put(sha)
sha.Reset()
rlp.Encode(sha, x)
sha.Read(h[:])
return h
}
// prefixedRlpHash writes the prefix into the hasher before rlp-encoding x.
// It's used for typed transactions.
func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) {
sha := hasherPool.Get().(crypto.KeccakState)
defer hasherPool.Put(sha)
sha.Reset()
sha.Write([]byte{prefix})
rlp.Encode(sha, x)
sha.Read(h[:])
return h
}
// TrieHasher is the tool used to calculate the hash of derivable list.
// This is internal, do not use.
type TrieHasher interface {
Reset()
Update([]byte, []byte)
Hash() common.Hash
}
// DerivableList is the input to DeriveSha.
// It is implemented by the 'Transactions' and 'Receipts' types.
// This is internal, do not use these methods.
type DerivableList interface {
Len() int
EncodeIndex(int, *bytes.Buffer)
}
func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
buf.Reset()
list.EncodeIndex(i, buf)
// It's really unfortunate that we need to do perform this copy.
// StackTrie holds onto the values until Hash is called, so the values
// written to it must not alias.
return common.CopyBytes(buf.Bytes())
}
// DeriveSha creates the tree hashes of transactions and receipts in a block header.
func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash {
hasher.Reset()
valueBuf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(valueBuf)
// StackTrie requires values to be inserted in increasing hash order, which is not the
// order that `list` provides hashes in. This insertion sequence ensures that the
// order is correct.
var indexBuf []byte
for i := 1; i < list.Len() && i <= 0x7f; i++ {
indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i))
value := encodeForDerive(list, i, valueBuf)
hasher.Update(indexBuf, value)
}
if list.Len() > 0 {
indexBuf = rlp.AppendUint64(indexBuf[:0], 0)
value := encodeForDerive(list, 0, valueBuf)
hasher.Update(indexBuf, value)
}
for i := 0x80; i < list.Len(); i++ {
indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i))
value := encodeForDerive(list, i, valueBuf)
hasher.Update(indexBuf, value)
}
return hasher.Hash()
}
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// LegacyTx is the transaction data of regular Ethereum transactions.
type LegacyTx struct {
Nonce uint64 // nonce of sender account
GasPrice *big.Int // wei per gas
Gas uint64 // gas limit
To *common.Address `rlp:"nil"` // nil means contract creation
Value *big.Int // wei amount
Data []byte // contract invocation input data
V, R, S *big.Int // signature values
}
// NewTransaction creates an unsigned legacy transaction.
// Deprecated: use NewTx instead.
func NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
return NewTx(&LegacyTx{
Nonce: nonce,
To: &to,
Value: amount,
Gas: gasLimit,
GasPrice: gasPrice,
Data: data,
})
}
// NewContractCreation creates an unsigned legacy transaction.
// Deprecated: use NewTx instead.
func NewContractCreation(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
return NewTx(&LegacyTx{
Nonce: nonce,
Value: amount,
Gas: gasLimit,
GasPrice: gasPrice,
Data: data,
})
}
// copy creates a deep copy of the transaction data and initializes all fields.
func (tx *LegacyTx) copy() TxData {
cpy := &LegacyTx{
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
Data: common.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are initialized below.
Value: new(big.Int),
GasPrice: new(big.Int),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
if tx.GasPrice != nil {
cpy.GasPrice.Set(tx.GasPrice)
}
if tx.V != nil {
cpy.V.Set(tx.V)
}
if tx.R != nil {
cpy.R.Set(tx.R)
}
if tx.S != nil {
cpy.S.Set(tx.S)
}
return cpy
}
// accessors for innerTx.
func (tx *LegacyTx) txType() byte { return LegacyTxType }
func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) }
func (tx *LegacyTx) accessList() AccessList { return nil }
func (tx *LegacyTx) data() []byte { return tx.Data }
func (tx *LegacyTx) gas() uint64 { return tx.Gas }
func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice }
func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice }
func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
func (tx *LegacyTx) value() *big.Int { return tx.Value }
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
func (tx *LegacyTx) to() *common.Address { return tx.To }
func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
}
func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) {
tx.V, tx.R, tx.S = v, r, s
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go
// Log represents a contract log event. These events are generated by the LOG opcode and
// stored/indexed by the node.
type Log struct {
// Consensus fields:
// address of the contract that generated the event
Address common.Address `json:"address" gencodec:"required"`
// list of topics provided by the contract.
Topics []common.Hash `json:"topics" gencodec:"required"`
// supplied by the contract, usually ABI-encoded
Data []byte `json:"data" gencodec:"required"`
// Derived fields. These fields are filled in by the node
// but not secured by consensus.
// block in which the transaction was included
BlockNumber uint64 `json:"blockNumber"`
// hash of the transaction
TxHash common.Hash `json:"transactionHash" gencodec:"required"`
// index of the transaction in the block
TxIndex uint `json:"transactionIndex"`
// hash of the block in which the transaction was included
BlockHash common.Hash `json:"blockHash"`
// index of the log in the block
Index uint `json:"logIndex"`
// The Removed field is true if this log was reverted due to a chain reorganisation.
// You must pay attention to this field if you receive logs through a filter query.
Removed bool `json:"removed"`
}
type logMarshaling struct {
Data hexutil.Bytes
BlockNumber hexutil.Uint64
TxIndex hexutil.Uint
Index hexutil.Uint
}
type rlpLog struct {
Address common.Address
Topics []common.Hash
Data []byte
}
// rlpStorageLog is the storage encoding of a log.
type rlpStorageLog rlpLog
// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields.
type legacyRlpStorageLog struct {
Address common.Address
Topics []common.Hash
Data []byte
BlockNumber uint64
TxHash common.Hash
TxIndex uint
BlockHash common.Hash
Index uint
}
// EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data})
}
// DecodeRLP implements rlp.Decoder.
func (l *Log) DecodeRLP(s *rlp.Stream) error {
var dec rlpLog
err := s.Decode(&dec)
if err == nil {
l.Address, l.Topics, l.Data = dec.Address, dec.Topics, dec.Data
}
return err
}
// LogForStorage is a wrapper around a Log that flattens and parses the entire content of
// a log including non-consensus fields.
type LogForStorage Log
// EncodeRLP implements rlp.Encoder.
func (l *LogForStorage) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpStorageLog{
Address: l.Address,
Topics: l.Topics,
Data: l.Data,
})
}
// DecodeRLP implements rlp.Decoder.
//
// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later.
func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error {
blob, err := s.Raw()
if err != nil {
return err
}
var dec rlpStorageLog
err = rlp.DecodeBytes(blob, &dec)
if err == nil {
*l = LogForStorage{
Address: dec.Address,
Topics: dec.Topics,
Data: dec.Data,
}
} else {
// Try to decode log with previous definition.
var dec legacyRlpStorageLog
err = rlp.DecodeBytes(blob, &dec)
if err == nil {
*l = LogForStorage{
Address: dec.Address,
Topics: dec.Topics,
Data: dec.Data,
}
}
}
return err
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"errors"
"fmt"
"io"
"math/big"
"unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
var (
receiptStatusFailedRLP = []byte{}
receiptStatusSuccessfulRLP = []byte{0x01}
)
// This error is returned when a typed receipt is decoded, but the string is empty.
var errEmptyTypedReceipt = errors.New("empty typed receipt bytes")
const (
// ReceiptStatusFailed is the status code of a transaction if execution failed.
ReceiptStatusFailed = uint64(0)
// ReceiptStatusSuccessful is the status code of a transaction if execution succeeded.
ReceiptStatusSuccessful = uint64(1)
)
// Receipt represents the results of a transaction.
type Receipt struct {
// Consensus fields: These fields are defined by the Yellow Paper
Type uint8 `json:"type,omitempty"`
PostState []byte `json:"root"`
Status uint64 `json:"status"`
CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Logs []*Log `json:"logs" gencodec:"required"`
// Implementation fields: These fields are added by geth when processing a transaction.
// They are stored in the chain database.
TxHash common.Hash `json:"transactionHash" gencodec:"required"`
ContractAddress common.Address `json:"contractAddress"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
// Inclusion information: These fields provide information about the inclusion of the
// transaction corresponding to this receipt.
BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *big.Int `json:"blockNumber,omitempty"`
TransactionIndex uint `json:"transactionIndex"`
}
type receiptMarshaling struct {
Type hexutil.Uint64
PostState hexutil.Bytes
Status hexutil.Uint64
CumulativeGasUsed hexutil.Uint64
GasUsed hexutil.Uint64
BlockNumber *hexutil.Big
TransactionIndex hexutil.Uint
}
// receiptRLP is the consensus encoding of a receipt.
type receiptRLP struct {
PostStateOrStatus []byte
CumulativeGasUsed uint64
Bloom Bloom
Logs []*Log
}
// storedReceiptRLP is the storage encoding of a receipt.
type storedReceiptRLP struct {
PostStateOrStatus []byte
CumulativeGasUsed uint64
Logs []*LogForStorage
}
// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4.
type v4StoredReceiptRLP struct {
PostStateOrStatus []byte
CumulativeGasUsed uint64
TxHash common.Hash
ContractAddress common.Address
Logs []*LogForStorage
GasUsed uint64
}
// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields.
type v3StoredReceiptRLP struct {
PostStateOrStatus []byte
CumulativeGasUsed uint64
Bloom Bloom
TxHash common.Hash
ContractAddress common.Address
Logs []*LogForStorage
GasUsed uint64
}
// NewReceipt creates a barebone transaction receipt, copying the init fields.
// Deprecated: create receipts using a struct literal instead.
func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt {
r := &Receipt{
Type: LegacyTxType,
PostState: common.CopyBytes(root),
CumulativeGasUsed: cumulativeGasUsed,
}
if failed {
r.Status = ReceiptStatusFailed
} else {
r.Status = ReceiptStatusSuccessful
}
return r
}
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
// into an RLP stream. If no post state is present, byzantium fork is assumed.
func (r *Receipt) EncodeRLP(w io.Writer) error {
data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}
if r.Type == LegacyTxType {
return rlp.Encode(w, data)
}
buf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(buf)
buf.Reset()
buf.WriteByte(r.Type)
if err := rlp.Encode(buf, data); err != nil {
return err
}
return rlp.Encode(w, buf.Bytes())
}
// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
// from an RLP stream.
func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
kind, _, err := s.Kind()
switch {
case err != nil:
return err
case kind == rlp.List:
// It's a legacy receipt.
var dec receiptRLP
if err := s.Decode(&dec); err != nil {
return err
}
r.Type = LegacyTxType
return r.setFromRLP(dec)
case kind == rlp.String:
// It's an EIP-2718 typed tx receipt.
b, err := s.Bytes()
if err != nil {
return err
}
if len(b) == 0 {
return errEmptyTypedReceipt
}
r.Type = b[0]
if r.Type == AccessListTxType || r.Type == DynamicFeeTxType {
var dec receiptRLP
if err := rlp.DecodeBytes(b[1:], &dec); err != nil {
return err
}
return r.setFromRLP(dec)
}
return ErrTxTypeNotSupported
default:
return rlp.ErrExpectedList
}
}
func (r *Receipt) setFromRLP(data receiptRLP) error {
r.CumulativeGasUsed, r.Bloom, r.Logs = data.CumulativeGasUsed, data.Bloom, data.Logs
return r.setStatus(data.PostStateOrStatus)
}
func (r *Receipt) setStatus(postStateOrStatus []byte) error {
switch {
case bytes.Equal(postStateOrStatus, receiptStatusSuccessfulRLP):
r.Status = ReceiptStatusSuccessful
case bytes.Equal(postStateOrStatus, receiptStatusFailedRLP):
r.Status = ReceiptStatusFailed
case len(postStateOrStatus) == len(common.Hash{}):
r.PostState = postStateOrStatus
default:
return fmt.Errorf("invalid receipt status %x", postStateOrStatus)
}
return nil
}
func (r *Receipt) statusEncoding() []byte {
if len(r.PostState) == 0 {
if r.Status == ReceiptStatusFailed {
return receiptStatusFailedRLP
}
return receiptStatusSuccessfulRLP
}
return r.PostState
}
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
func (r *Receipt) Size() common.StorageSize {
size := common.StorageSize(unsafe.Sizeof(*r)) + common.StorageSize(len(r.PostState))
size += common.StorageSize(len(r.Logs)) * common.StorageSize(unsafe.Sizeof(Log{}))
for _, log := range r.Logs {
size += common.StorageSize(len(log.Topics)*common.HashLength + len(log.Data))
}
return size
}
// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the
// entire content of a receipt, as opposed to only the consensus fields originally.
type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream.
func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
enc := &storedReceiptRLP{
PostStateOrStatus: (*Receipt)(r).statusEncoding(),
CumulativeGasUsed: r.CumulativeGasUsed,
Logs: make([]*LogForStorage, len(r.Logs)),
}
for i, log := range r.Logs {
enc.Logs[i] = (*LogForStorage)(log)
}
return rlp.Encode(w, enc)
}
// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation
// fields of a receipt from an RLP stream.
func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
// Retrieve the entire receipt blob as we need to try multiple decoders
blob, err := s.Raw()
if err != nil {
return err
}
// Try decoding from the newest format for future proofness, then the older one
// for old nodes that just upgraded. V4 was an intermediate unreleased format so
// we do need to decode it, but it's not common (try last).
if err := decodeStoredReceiptRLP(r, blob); err == nil {
return nil
}
if err := decodeV3StoredReceiptRLP(r, blob); err == nil {
return nil
}
return decodeV4StoredReceiptRLP(r, blob)
}
func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error {
var stored storedReceiptRLP
if err := rlp.DecodeBytes(blob, &stored); err != nil {
return err
}
if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil {
return err
}
r.CumulativeGasUsed = stored.CumulativeGasUsed
r.Logs = make([]*Log, len(stored.Logs))
for i, log := range stored.Logs {
r.Logs[i] = (*Log)(log)
}
r.Bloom = CreateBloom(Receipts{(*Receipt)(r)})
return nil
}
func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error {
var stored v4StoredReceiptRLP
if err := rlp.DecodeBytes(blob, &stored); err != nil {
return err
}
if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil {
return err
}
r.CumulativeGasUsed = stored.CumulativeGasUsed
r.TxHash = stored.TxHash
r.ContractAddress = stored.ContractAddress
r.GasUsed = stored.GasUsed
r.Logs = make([]*Log, len(stored.Logs))
for i, log := range stored.Logs {
r.Logs[i] = (*Log)(log)
}
r.Bloom = CreateBloom(Receipts{(*Receipt)(r)})
return nil
}
func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error {
var stored v3StoredReceiptRLP
if err := rlp.DecodeBytes(blob, &stored); err != nil {
return err
}
if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil {
return err
}
r.CumulativeGasUsed = stored.CumulativeGasUsed
r.Bloom = stored.Bloom
r.TxHash = stored.TxHash
r.ContractAddress = stored.ContractAddress
r.GasUsed = stored.GasUsed
r.Logs = make([]*Log, len(stored.Logs))
for i, log := range stored.Logs {
r.Logs[i] = (*Log)(log)
}
return nil
}
// Receipts implements DerivableList for receipts.
type Receipts []*Receipt
// Len returns the number of receipts in this list.
func (rs Receipts) Len() int { return len(rs) }
// EncodeIndex encodes the i'th receipt to w.
func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
r := rs[i]
data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}
switch r.Type {
case LegacyTxType:
rlp.Encode(w, data)
case AccessListTxType:
w.WriteByte(AccessListTxType)
rlp.Encode(w, data)
case DynamicFeeTxType:
w.WriteByte(DynamicFeeTxType)
rlp.Encode(w, data)
default:
// For unsupported types, write nothing. Since this is for
// DeriveSha, the error will be caught matching the derived hash
// to the block.
}
}
// DeriveFields fills the receipts with their computed fields based on consensus
// data and contextual infos like containing block and transactions.
func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error {
signer := MakeSigner(config, new(big.Int).SetUint64(number))
logIndex := uint(0)
if len(txs) != len(r) {
return errors.New("transaction and receipt count mismatch")
}
for i := 0; i < len(r); i++ {
// The transaction type and hash can be retrieved from the transaction itself
r[i].Type = txs[i].Type()
r[i].TxHash = txs[i].Hash()
// block location fields
r[i].BlockHash = hash
r[i].BlockNumber = new(big.Int).SetUint64(number)
r[i].TransactionIndex = uint(i)
// The contract address can be derived from the transaction itself
if txs[i].To() == nil {
// Deriving the signer is expensive, only do if it's actually needed
from, _ := Sender(signer, txs[i])
r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce())
}
// The used gas can be calculated based on previous r
if i == 0 {
r[i].GasUsed = r[i].CumulativeGasUsed
} else {
r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed
}
// The derived log fields can simply be set from the block and transaction
for j := 0; j < len(r[i].Logs); j++ {
r[i].Logs[j].BlockNumber = number
r[i].Logs[j].BlockHash = hash
r[i].Logs[j].TxHash = r[i].TxHash
r[i].Logs[j].TxIndex = uint(i)
r[i].Logs[j].Index = logIndex
logIndex++
}
}
return nil
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"container/heap"
"errors"
"io"
"math/big"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
)
var (
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures")
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
errEmptyTypedTx = errors.New("empty typed transaction bytes")
)
// Transaction types.
const (
LegacyTxType = iota
AccessListTxType
DynamicFeeTxType
)
// Transaction is an Ethereum transaction.
type Transaction struct {
inner TxData // Consensus contents of a transaction
time time.Time // Time first seen locally (spam avoidance)
// caches
hash atomic.Value
size atomic.Value
from atomic.Value
}
// NewTx creates a new transaction.
func NewTx(inner TxData) *Transaction {
tx := new(Transaction)
tx.setDecoded(inner.copy(), 0)
return tx
}
// TxData is the underlying data of a transaction.
//
// This is implemented by DynamicFeeTx, LegacyTx and AccessListTx.
type TxData interface {
txType() byte // returns the type ID
copy() TxData // creates a deep copy and initializes all fields
chainID() *big.Int
accessList() AccessList
data() []byte
gas() uint64
gasPrice() *big.Int
gasTipCap() *big.Int
gasFeeCap() *big.Int
value() *big.Int
nonce() uint64
to() *common.Address
rawSignatureValues() (v, r, s *big.Int)
setSignatureValues(chainID, v, r, s *big.Int)
}
// EncodeRLP implements rlp.Encoder
func (tx *Transaction) EncodeRLP(w io.Writer) error {
if tx.Type() == LegacyTxType {
return rlp.Encode(w, tx.inner)
}
// It's an EIP-2718 typed TX envelope.
buf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(buf)
buf.Reset()
if err := tx.encodeTyped(buf); err != nil {
return err
}
return rlp.Encode(w, buf.Bytes())
}
// encodeTyped writes the canonical encoding of a typed transaction to w.
func (tx *Transaction) encodeTyped(w *bytes.Buffer) error {
w.WriteByte(tx.Type())
return rlp.Encode(w, tx.inner)
}
// MarshalBinary returns the canonical encoding of the transaction.
// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed
// transactions, it returns the type and payload.
func (tx *Transaction) MarshalBinary() ([]byte, error) {
if tx.Type() == LegacyTxType {
return rlp.EncodeToBytes(tx.inner)
}
var buf bytes.Buffer
err := tx.encodeTyped(&buf)
return buf.Bytes(), err
}
// DecodeRLP implements rlp.Decoder
func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
kind, size, err := s.Kind()
switch {
case err != nil:
return err
case kind == rlp.List:
// It's a legacy transaction.
var inner LegacyTx
err := s.Decode(&inner)
if err == nil {
tx.setDecoded(&inner, int(rlp.ListSize(size)))
}
return err
case kind == rlp.String:
// It's an EIP-2718 typed TX envelope.
var b []byte
if b, err = s.Bytes(); err != nil {
return err
}
inner, err := tx.decodeTyped(b)
if err == nil {
tx.setDecoded(inner, len(b))
}
return err
default:
return rlp.ErrExpectedList
}
}
// UnmarshalBinary decodes the canonical encoding of transactions.
// It supports legacy RLP transactions and EIP2718 typed transactions.
func (tx *Transaction) UnmarshalBinary(b []byte) error {
if len(b) > 0 && b[0] > 0x7f {
// It's a legacy transaction.
var data LegacyTx
err := rlp.DecodeBytes(b, &data)
if err != nil {
return err
}
tx.setDecoded(&data, len(b))
return nil
}
// It's an EIP2718 typed transaction envelope.
inner, err := tx.decodeTyped(b)
if err != nil {
return err
}
tx.setDecoded(inner, len(b))
return nil
}
// decodeTyped decodes a typed transaction from the canonical format.
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
if len(b) == 0 {
return nil, errEmptyTypedTx
}
switch b[0] {
case AccessListTxType:
var inner AccessListTx
err := rlp.DecodeBytes(b[1:], &inner)
return &inner, err
case DynamicFeeTxType:
var inner DynamicFeeTx
err := rlp.DecodeBytes(b[1:], &inner)
return &inner, err
default:
return nil, ErrTxTypeNotSupported
}
}
// setDecoded sets the inner transaction and size after decoding.
func (tx *Transaction) setDecoded(inner TxData, size int) {
tx.inner = inner
tx.time = time.Now()
if size > 0 {
tx.size.Store(common.StorageSize(size))
}
}
func sanityCheckSignature(v *big.Int, r *big.Int, s *big.Int, maybeProtected bool) error {
if isProtectedV(v) && !maybeProtected {
return ErrUnexpectedProtection
}
var plainV byte
if isProtectedV(v) {
chainID := deriveChainId(v).Uint64()
plainV = byte(v.Uint64() - 35 - 2*chainID)
} else if maybeProtected {
// Only EIP-155 signatures can be optionally protected. Since
// we determined this v value is not protected, it must be a
// raw 27 or 28.
plainV = byte(v.Uint64() - 27)
} else {
// If the signature is not optionally protected, we assume it
// must already be equal to the recovery id.
plainV = byte(v.Uint64())
}
if !crypto.ValidateSignatureValues(plainV, r, s, false) {
return ErrInvalidSig
}
return nil
}
func isProtectedV(V *big.Int) bool {
if V.BitLen() <= 8 {
v := V.Uint64()
return v != 27 && v != 28 && v != 1 && v != 0
}
// anything not 27 or 28 is considered protected
return true
}
// Protected says whether the transaction is replay-protected.
func (tx *Transaction) Protected() bool {
switch tx := tx.inner.(type) {
case *LegacyTx:
return tx.V != nil && isProtectedV(tx.V)
default:
return true
}
}
// Type returns the transaction type.
func (tx *Transaction) Type() uint8 {
return tx.inner.txType()
}
// ChainId returns the EIP155 chain ID of the transaction. The return value will always be
// non-nil. For legacy transactions which are not replay-protected, the return value is
// zero.
func (tx *Transaction) ChainId() *big.Int {
return tx.inner.chainID()
}
// Data returns the input data of the transaction.
func (tx *Transaction) Data() []byte { return tx.inner.data() }
// AccessList returns the access list of the transaction.
func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() }
// Gas returns the gas limit of the transaction.
func (tx *Transaction) Gas() uint64 { return tx.inner.gas() }
// GasPrice returns the gas price of the transaction.
func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) }
// GasTipCap returns the gasTipCap per gas of the transaction.
func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) }
// GasFeeCap returns the fee cap per gas of the transaction.
func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
// Value returns the ether amount of the transaction.
func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
// Nonce returns the sender account nonce of the transaction.
func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() }
// To returns the recipient address of the transaction.
// For contract-creation transactions, To returns nil.
func (tx *Transaction) To() *common.Address {
// Copy the pointed-to address.
ito := tx.inner.to()
if ito == nil {
return nil
}
cpy := *ito
return &cpy
}
// Cost returns gas * gasPrice + value.
func (tx *Transaction) Cost() *big.Int {
total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
total.Add(total, tx.Value())
return total
}
// RawSignatureValues returns the V, R, S signature values of the transaction.
// The return values should not be modified by the caller.
func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) {
return tx.inner.rawSignatureValues()
}
// GasFeeCapCmp compares the fee cap of two transactions.
func (tx *Transaction) GasFeeCapCmp(other *Transaction) int {
return tx.inner.gasFeeCap().Cmp(other.inner.gasFeeCap())
}
// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap.
func (tx *Transaction) GasFeeCapIntCmp(other *big.Int) int {
return tx.inner.gasFeeCap().Cmp(other)
}
// GasTipCapCmp compares the gasTipCap of two transactions.
func (tx *Transaction) GasTipCapCmp(other *Transaction) int {
return tx.inner.gasTipCap().Cmp(other.inner.gasTipCap())
}
// GasTipCapIntCmp compares the gasTipCap of the transaction against the given gasTipCap.
func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int {
return tx.inner.gasTipCap().Cmp(other)
}
// EffectiveGasTip returns the effective miner gasTipCap for the given base fee.
// Note: if the effective gasTipCap is negative, this method returns both error
// the actual negative value, _and_ ErrGasFeeCapTooLow
func (tx *Transaction) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) {
if baseFee == nil {
return tx.GasTipCap(), nil
}
var err error
gasFeeCap := tx.GasFeeCap()
if gasFeeCap.Cmp(baseFee) == -1 {
err = ErrGasFeeCapTooLow
}
return math.BigMin(tx.GasTipCap(), gasFeeCap.Sub(gasFeeCap, baseFee)), err
}
// EffectiveGasTipValue is identical to EffectiveGasTip, but does not return an
// error in case the effective gasTipCap is negative
func (tx *Transaction) EffectiveGasTipValue(baseFee *big.Int) *big.Int {
effectiveTip, _ := tx.EffectiveGasTip(baseFee)
return effectiveTip
}
// EffectiveGasTipCmp compares the effective gasTipCap of two transactions assuming the given base fee.
func (tx *Transaction) EffectiveGasTipCmp(other *Transaction, baseFee *big.Int) int {
if baseFee == nil {
return tx.GasTipCapCmp(other)
}
return tx.EffectiveGasTipValue(baseFee).Cmp(other.EffectiveGasTipValue(baseFee))
}
// EffectiveGasTipIntCmp compares the effective gasTipCap of a transaction to the given gasTipCap.
func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) int {
if baseFee == nil {
return tx.GasTipCapIntCmp(other)
}
return tx.EffectiveGasTipValue(baseFee).Cmp(other)
}
// Hash returns the transaction hash.
func (tx *Transaction) Hash() common.Hash {
if hash := tx.hash.Load(); hash != nil {
return hash.(common.Hash)
}
var h common.Hash
if tx.Type() == LegacyTxType {
h = rlpHash(tx.inner)
} else {
h = prefixedRlpHash(tx.Type(), tx.inner)
}
tx.hash.Store(h)
return h
}
// Size returns the true RLP encoded storage size of the transaction, either by
// encoding and returning it, or returning a previously cached value.
func (tx *Transaction) Size() common.StorageSize {
if size := tx.size.Load(); size != nil {
return size.(common.StorageSize)
}
c := writeCounter(0)
rlp.Encode(&c, &tx.inner)
tx.size.Store(common.StorageSize(c))
return common.StorageSize(c)
}
// WithSignature returns a new transaction with the given signature.
// This signature needs to be in the [R || S || V] format where V is 0 or 1.
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {
return nil, err
}
cpy := tx.inner.copy()
cpy.setSignatureValues(signer.ChainID(), v, r, s)
return &Transaction{inner: cpy, time: tx.time}, nil
}
// Transactions implements DerivableList for transactions.
type Transactions []*Transaction
// Len returns the length of s.
func (s Transactions) Len() int { return len(s) }
// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors
// because we assume that *Transaction will only ever contain valid txs that were either
// constructed by decoding or via public API in this package.
func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) {
tx := s[i]
if tx.Type() == LegacyTxType {
rlp.Encode(w, tx.inner)
} else {
tx.encodeTyped(w)
}
}
// TxDifference returns a new set which is the difference between a and b.
func TxDifference(a, b Transactions) Transactions {
keep := make(Transactions, 0, len(a))
remove := make(map[common.Hash]struct{})
for _, tx := range b {
remove[tx.Hash()] = struct{}{}
}
for _, tx := range a {
if _, ok := remove[tx.Hash()]; !ok {
keep = append(keep, tx)
}
}
return keep
}
// TxByNonce implements the sort interface to allow sorting a list of transactions
// by their nonces. This is usually only useful for sorting transactions from a
// single account, otherwise a nonce comparison doesn't make much sense.
type TxByNonce Transactions
func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
type TxWithMinerFee struct {
tx *Transaction
minerFee *big.Int
}
// NewTxWithMinerFee creates a wrapped transaction, calculating the effective
// miner gasTipCap if a base fee is provided.
// Returns error in case of a negative effective miner gasTipCap.
func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) {
minerFee, err := tx.EffectiveGasTip(baseFee)
if err != nil {
return nil, err
}
return &TxWithMinerFee{
tx: tx,
minerFee: minerFee,
}, nil
}
// TxByPriceAndTime implements both the sort and the heap interface, making it useful
// for all at once sorting as well as individually adding and removing elements.
type TxByPriceAndTime []*TxWithMinerFee
func (s TxByPriceAndTime) Len() int { return len(s) }
func (s TxByPriceAndTime) Less(i, j int) bool {
// If the prices are equal, use the time the transaction was first seen for
// deterministic sorting
cmp := s[i].minerFee.Cmp(s[j].minerFee)
if cmp == 0 {
return s[i].tx.time.Before(s[j].tx.time)
}
return cmp > 0
}
func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *TxByPriceAndTime) Push(x interface{}) {
*s = append(*s, x.(*TxWithMinerFee))
}
func (s *TxByPriceAndTime) Pop() interface{} {
old := *s
n := len(old)
x := old[n-1]
*s = old[0 : n-1]
return x
}
// TransactionsByPriceAndNonce represents a set of transactions that can return
// transactions in a profit-maximizing sorted order, while supporting removing
// entire batches of transactions for non-executable accounts.
type TransactionsByPriceAndNonce struct {
txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
heads TxByPriceAndTime // Next transaction for each unique account (price heap)
signer Signer // Signer for the set of transactions
baseFee *big.Int // Current base fee
}
// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
// price sorted transactions in a nonce-honouring way.
//
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce {
// Initialize a price and received time based heap with the head transactions
heads := make(TxByPriceAndTime, 0, len(txs))
for from, accTxs := range txs {
acc, _ := Sender(signer, accTxs[0])
wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee)
// Remove transaction if sender doesn't match from, or if wrapping fails.
if acc != from || err != nil {
delete(txs, from)
continue
}
heads = append(heads, wrapped)
txs[from] = accTxs[1:]
}
heap.Init(&heads)
// Assemble and return the transaction set
return &TransactionsByPriceAndNonce{
txs: txs,
heads: heads,
signer: signer,
baseFee: baseFee,
}
}
// Peek returns the next transaction by price.
func (t *TransactionsByPriceAndNonce) Peek() *Transaction {
if len(t.heads) == 0 {
return nil
}
return t.heads[0].tx
}
// Shift replaces the current best head with the next one from the same account.
func (t *TransactionsByPriceAndNonce) Shift() {
acc, _ := Sender(t.signer, t.heads[0].tx)
if txs, ok := t.txs[acc]; ok && len(txs) > 0 {
if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil {
t.heads[0], t.txs[acc] = wrapped, txs[1:]
heap.Fix(&t.heads, 0)
return
}
}
heap.Pop(&t.heads)
}
// Pop removes the best transaction, *not* replacing it with the next one from
// the same account. This should be used when a transaction cannot be executed
// and hence all subsequent ones should be discarded from the same account.
func (t *TransactionsByPriceAndNonce) Pop() {
heap.Pop(&t.heads)
}
// Message is a fully derived transaction and implements core.Message
//
// NOTE: In a future PR this will be removed.
type Message struct {
to *common.Address
from common.Address
nonce uint64
amount *big.Int
gasLimit uint64
gasPrice *big.Int
gasFeeCap *big.Int
gasTipCap *big.Int
data []byte
accessList AccessList
isFake bool
}
func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, gasFeeCap, gasTipCap *big.Int, data []byte, accessList AccessList, isFake bool) Message {
return Message{
from: from,
to: to,
nonce: nonce,
amount: amount,
gasLimit: gasLimit,
gasPrice: gasPrice,
gasFeeCap: gasFeeCap,
gasTipCap: gasTipCap,
data: data,
accessList: accessList,
isFake: isFake,
}
}
// AsMessage returns the transaction as a core.Message.
func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) {
msg := Message{
nonce: tx.Nonce(),
gasLimit: tx.Gas(),
gasPrice: new(big.Int).Set(tx.GasPrice()),
gasFeeCap: new(big.Int).Set(tx.GasFeeCap()),
gasTipCap: new(big.Int).Set(tx.GasTipCap()),
to: tx.To(),
amount: tx.Value(),
data: tx.Data(),
accessList: tx.AccessList(),
isFake: false,
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
if baseFee != nil {
msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap)
}
var err error
msg.from, err = Sender(s, tx)
return msg, err
}
func (m Message) From() common.Address { return m.from }
func (m Message) To() *common.Address { return m.to }
func (m Message) GasPrice() *big.Int { return m.gasPrice }
func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap }
func (m Message) GasTipCap() *big.Int { return m.gasTipCap }
func (m Message) Value() *big.Int { return m.amount }
func (m Message) Gas() uint64 { return m.gasLimit }
func (m Message) Nonce() uint64 { return m.nonce }
func (m Message) Data() []byte { return m.data }
func (m Message) AccessList() AccessList { return m.accessList }
func (m Message) IsFake() bool { return m.isFake }
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
var ErrInvalidChainId = errors.New("invalid chain id for signer")
// sigCache is used to cache the derived sender and contains
// the signer used to derive it.
type sigCache struct {
signer Signer
from common.Address
}
// MakeSigner returns a Signer based on the given chain config and block number.
func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer {
var signer Signer
switch {
case config.IsLondon(blockNumber):
signer = NewLondonSigner(config.ChainID)
case config.IsBerlin(blockNumber):
signer = NewEIP2930Signer(config.ChainID)
case config.IsEIP155(blockNumber):
signer = NewEIP155Signer(config.ChainID)
case config.IsHomestead(blockNumber):
signer = HomesteadSigner{}
default:
signer = FrontierSigner{}
}
return signer
}
// LatestSigner returns the 'most permissive' Signer available for the given chain
// configuration. Specifically, this enables support of EIP-155 replay protection and
// EIP-2930 access list transactions when their respective forks are scheduled to occur at
// any block number in the chain config.
//
// Use this in transaction-handling code where the current block number is unknown. If you
// have the current block number available, use MakeSigner instead.
func LatestSigner(config *params.ChainConfig) Signer {
if config.ChainID != nil {
if config.LondonBlock != nil {
return NewLondonSigner(config.ChainID)
}
if config.BerlinBlock != nil {
return NewEIP2930Signer(config.ChainID)
}
if config.EIP155Block != nil {
return NewEIP155Signer(config.ChainID)
}
}
return HomesteadSigner{}
}
// LatestSignerForChainID returns the 'most permissive' Signer available. Specifically,
// this enables support for EIP-155 replay protection and all implemented EIP-2718
// transaction types if chainID is non-nil.
//
// Use this in transaction-handling code where the current block number and fork
// configuration are unknown. If you have a ChainConfig, use LatestSigner instead.
// If you have a ChainConfig and know the current block number, use MakeSigner instead.
func LatestSignerForChainID(chainID *big.Int) Signer {
if chainID == nil {
return HomesteadSigner{}
}
return NewLondonSigner(chainID)
}
// SignTx signs the transaction using the given signer and private key.
func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, error) {
h := s.Hash(tx)
sig, err := crypto.Sign(h[:], prv)
if err != nil {
return nil, err
}
return tx.WithSignature(s, sig)
}
// SignNewTx creates a transaction and signs it.
func SignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) (*Transaction, error) {
tx := NewTx(txdata)
h := s.Hash(tx)
sig, err := crypto.Sign(h[:], prv)
if err != nil {
return nil, err
}
return tx.WithSignature(s, sig)
}
// MustSignNewTx creates a transaction and signs it.
// This panics if the transaction cannot be signed.
func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction {
tx, err := SignNewTx(prv, s, txdata)
if err != nil {
panic(err)
}
return tx
}
// Sender returns the address derived from the signature (V, R, S) using secp256k1
// elliptic curve and an error if it failed deriving or upon an incorrect
// signature.
//
// Sender may cache the address, allowing it to be used regardless of
// signing method. The cache is invalidated if the cached signer does
// not match the signer used in the current call.
func Sender(signer Signer, tx *Transaction) (common.Address, error) {
if sc := tx.from.Load(); sc != nil {
sigCache := sc.(sigCache)
// If the signer used to derive from in a previous
// call is not the same as used current, invalidate
// the cache.
if sigCache.signer.Equal(signer) {
return sigCache.from, nil
}
}
addr, err := signer.Sender(tx)
if err != nil {
return common.Address{}, err
}
tx.from.Store(sigCache{signer: signer, from: addr})
return addr, nil
}
// Signer encapsulates transaction signature handling. The name of this type is slightly
// misleading because Signers don't actually sign, they're just for validating and
// processing of signatures.
//
// Note that this interface is not a stable API and may change at any time to accommodate
// new protocol rules.
type Signer interface {
// Sender returns the sender address of the transaction.
Sender(tx *Transaction) (common.Address, error)
// SignatureValues returns the raw R, S, V values corresponding to the
// given signature.
SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error)
ChainID() *big.Int
// Hash returns 'signature hash', i.e. the transaction hash that is signed by the
// private key. This hash does not uniquely identify the transaction.
Hash(tx *Transaction) common.Hash
// Equal returns true if the given signer is the same as the receiver.
Equal(Signer) bool
}
type londonSigner struct{ eip2930Signer }
// NewLondonSigner returns a signer that accepts
// - EIP-1559 dynamic fee transactions
// - EIP-2930 access list transactions,
// - EIP-155 replay protected transactions, and
// - legacy Homestead transactions.
func NewLondonSigner(chainId *big.Int) Signer {
return londonSigner{eip2930Signer{NewEIP155Signer(chainId)}}
}
func (s londonSigner) Sender(tx *Transaction) (common.Address, error) {
if tx.Type() != DynamicFeeTxType {
return s.eip2930Signer.Sender(tx)
}
V, R, S := tx.RawSignatureValues()
// DynamicFee txs are defined to use 0 and 1 as their recovery
// id, add 27 to become equivalent to unprotected Homestead signatures.
V = new(big.Int).Add(V, big.NewInt(27))
if tx.ChainId().Cmp(s.chainId) != 0 {
return common.Address{}, ErrInvalidChainId
}
return recoverPlain(s.Hash(tx), R, S, V, true)
}
func (s londonSigner) Equal(s2 Signer) bool {
x, ok := s2.(londonSigner)
return ok && x.chainId.Cmp(s.chainId) == 0
}
func (s londonSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) {
txdata, ok := tx.inner.(*DynamicFeeTx)
if !ok {
return s.eip2930Signer.SignatureValues(tx, sig)
}
// Check that chain ID of tx matches the signer. We also accept ID zero here,
// because it indicates that the chain ID was not specified in the tx.
if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 {
return nil, nil, nil, ErrInvalidChainId
}
R, S, _ = decodeSignature(sig)
V = big.NewInt(int64(sig[64]))
return R, S, V, nil
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (s londonSigner) Hash(tx *Transaction) common.Hash {
if tx.Type() != DynamicFeeTxType {
return s.eip2930Signer.Hash(tx)
}
return prefixedRlpHash(
tx.Type(),
[]interface{}{
s.chainId,
tx.Nonce(),
tx.GasTipCap(),
tx.GasFeeCap(),
tx.Gas(),
tx.To(),
tx.Value(),
tx.Data(),
tx.AccessList(),
})
}
type eip2930Signer struct{ EIP155Signer }
// NewEIP2930Signer returns a signer that accepts EIP-2930 access list transactions,
// EIP-155 replay protected transactions, and legacy Homestead transactions.
func NewEIP2930Signer(chainId *big.Int) Signer {
return eip2930Signer{NewEIP155Signer(chainId)}
}
func (s eip2930Signer) ChainID() *big.Int {
return s.chainId
}
func (s eip2930Signer) Equal(s2 Signer) bool {
x, ok := s2.(eip2930Signer)
return ok && x.chainId.Cmp(s.chainId) == 0
}
func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) {
V, R, S := tx.RawSignatureValues()
switch tx.Type() {
case LegacyTxType:
if !tx.Protected() {
return HomesteadSigner{}.Sender(tx)
}
V = new(big.Int).Sub(V, s.chainIdMul)
V.Sub(V, big8)
case AccessListTxType:
// AL txs are defined to use 0 and 1 as their recovery
// id, add 27 to become equivalent to unprotected Homestead signatures.
V = new(big.Int).Add(V, big.NewInt(27))
default:
return common.Address{}, ErrTxTypeNotSupported
}
if tx.ChainId().Cmp(s.chainId) != 0 {
return common.Address{}, ErrInvalidChainId
}
return recoverPlain(s.Hash(tx), R, S, V, true)
}
func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) {
switch txdata := tx.inner.(type) {
case *LegacyTx:
return s.EIP155Signer.SignatureValues(tx, sig)
case *AccessListTx:
// Check that chain ID of tx matches the signer. We also accept ID zero here,
// because it indicates that the chain ID was not specified in the tx.
if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 {
return nil, nil, nil, ErrInvalidChainId
}
R, S, _ = decodeSignature(sig)
V = big.NewInt(int64(sig[64]))
default:
return nil, nil, nil, ErrTxTypeNotSupported
}
return R, S, V, nil
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (s eip2930Signer) Hash(tx *Transaction) common.Hash {
switch tx.Type() {
case LegacyTxType:
return rlpHash([]interface{}{
tx.Nonce(),
tx.GasPrice(),
tx.Gas(),
tx.To(),
tx.Value(),
tx.Data(),
s.chainId, uint(0), uint(0),
})
case AccessListTxType:
return prefixedRlpHash(
tx.Type(),
[]interface{}{
s.chainId,
tx.Nonce(),
tx.GasPrice(),
tx.Gas(),
tx.To(),
tx.Value(),
tx.Data(),
tx.AccessList(),
})
default:
// This _should_ not happen, but in case someone sends in a bad
// json struct via RPC, it's probably more prudent to return an
// empty hash instead of killing the node with a panic
//panic("Unsupported transaction type: %d", tx.typ)
return common.Hash{}
}
}
// EIP155Signer implements Signer using the EIP-155 rules. This accepts transactions which
// are replay-protected as well as unprotected homestead transactions.
type EIP155Signer struct {
chainId, chainIdMul *big.Int
}
func NewEIP155Signer(chainId *big.Int) EIP155Signer {
if chainId == nil {
chainId = new(big.Int)
}
return EIP155Signer{
chainId: chainId,
chainIdMul: new(big.Int).Mul(chainId, big.NewInt(2)),
}
}
func (s EIP155Signer) ChainID() *big.Int {
return s.chainId
}
func (s EIP155Signer) Equal(s2 Signer) bool {
eip155, ok := s2.(EIP155Signer)
return ok && eip155.chainId.Cmp(s.chainId) == 0
}
var big8 = big.NewInt(8)
func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) {
if tx.Type() != LegacyTxType {
return common.Address{}, ErrTxTypeNotSupported
}
if !tx.Protected() {
return HomesteadSigner{}.Sender(tx)
}
if tx.ChainId().Cmp(s.chainId) != 0 {
return common.Address{}, ErrInvalidChainId
}
V, R, S := tx.RawSignatureValues()
V = new(big.Int).Sub(V, s.chainIdMul)
V.Sub(V, big8)
return recoverPlain(s.Hash(tx), R, S, V, true)
}
// SignatureValues returns signature values. This signature
// needs to be in the [R || S || V] format where V is 0 or 1.
func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) {
if tx.Type() != LegacyTxType {
return nil, nil, nil, ErrTxTypeNotSupported
}
R, S, V = decodeSignature(sig)
if s.chainId.Sign() != 0 {
V = big.NewInt(int64(sig[64] + 35))
V.Add(V, s.chainIdMul)
}
return R, S, V, nil
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (s EIP155Signer) Hash(tx *Transaction) common.Hash {
return rlpHash([]interface{}{
tx.Nonce(),
tx.GasPrice(),
tx.Gas(),
tx.To(),
tx.Value(),
tx.Data(),
s.chainId, uint(0), uint(0),
})
}
// HomesteadTransaction implements TransactionInterface using the
// homestead rules.
type HomesteadSigner struct{ FrontierSigner }
func (s HomesteadSigner) ChainID() *big.Int {
return nil
}
func (s HomesteadSigner) Equal(s2 Signer) bool {
_, ok := s2.(HomesteadSigner)
return ok
}
// SignatureValues returns signature values. This signature
// needs to be in the [R || S || V] format where V is 0 or 1.
func (hs HomesteadSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
return hs.FrontierSigner.SignatureValues(tx, sig)
}
func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) {
if tx.Type() != LegacyTxType {
return common.Address{}, ErrTxTypeNotSupported
}
v, r, s := tx.RawSignatureValues()
return recoverPlain(hs.Hash(tx), r, s, v, true)
}
type FrontierSigner struct{}
func (s FrontierSigner) ChainID() *big.Int {
return nil
}
func (s FrontierSigner) Equal(s2 Signer) bool {
_, ok := s2.(FrontierSigner)
return ok
}
func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) {
if tx.Type() != LegacyTxType {
return common.Address{}, ErrTxTypeNotSupported
}
v, r, s := tx.RawSignatureValues()
return recoverPlain(fs.Hash(tx), r, s, v, false)
}
// SignatureValues returns signature values. This signature
// needs to be in the [R || S || V] format where V is 0 or 1.
func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
if tx.Type() != LegacyTxType {
return nil, nil, nil, ErrTxTypeNotSupported
}
r, s, v = decodeSignature(sig)
return r, s, v, nil
}
// Hash returns the hash to be signed by the sender.
// It does not uniquely identify the transaction.
func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
return rlpHash([]interface{}{
tx.Nonce(),
tx.GasPrice(),
tx.Gas(),
tx.To(),
tx.Value(),
tx.Data(),
})
}
func decodeSignature(sig []byte) (r, s, v *big.Int) {
if len(sig) != crypto.SignatureLength {
panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength))
}
r = new(big.Int).SetBytes(sig[:32])
s = new(big.Int).SetBytes(sig[32:64])
v = new(big.Int).SetBytes([]byte{sig[64] + 27})
return r, s, v
}
func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) {
if Vb.BitLen() > 8 {
return common.Address{}, ErrInvalidSig
}
V := byte(Vb.Uint64() - 27)
if !crypto.ValidateSignatureValues(V, R, S, homestead) {
return common.Address{}, ErrInvalidSig
}
// encode the signature in uncompressed format
r, s := R.Bytes(), S.Bytes()
sig := make([]byte, crypto.SignatureLength)
copy(sig[32-len(r):32], r)
copy(sig[64-len(s):64], s)
sig[64] = V
// recover the public key from the signature
pub, err := crypto.Ecrecover(sighash[:], sig)
if err != nil {
return common.Address{}, err
}
if len(pub) == 0 || pub[0] != 4 {
return common.Address{}, errors.New("invalid public key")
}
var addr common.Address
copy(addr[:], crypto.Keccak256(pub[1:])[12:])
return addr, nil
}
// deriveChainId derives the chain id from the given v parameter
func deriveChainId(v *big.Int) *big.Int {
if v.BitLen() <= 64 {
v := v.Uint64()
if v == 27 || v == 28 {
return new(big.Int)
}
return new(big.Int).SetUint64((v - 35) / 2)
}
v = new(big.Int).Sub(v, big.NewInt(35))
return v.Div(v, big.NewInt(2))
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package crypto
import (
"bufio"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
//SignatureLength indicates the byte length required to carry a signature with recovery id.
const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id
// RecoveryIDOffset points to the byte offset within the signature that contains the recovery id.
const RecoveryIDOffset = 64
// DigestLength sets the signature digest exact length
const DigestLength = 32
var (
secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
)
var errInvalidPubkey = errors.New("invalid secp256k1 public key")
// KeccakState wraps sha3.state. In addition to the usual hash methods, it also supports
// Read to get a variable amount of data from the hash state. Read is faster than Sum
// because it doesn't copy the internal state, but also modifies the internal state.
type KeccakState interface {
hash.Hash
Read([]byte) (int, error)
}
// NewKeccakState creates a new KeccakState
func NewKeccakState() KeccakState {
return sha3.NewLegacyKeccak256().(KeccakState)
}
// HashData hashes the provided data using the KeccakState and returns a 32 byte hash
func HashData(kh KeccakState, data []byte) (h common.Hash) {
kh.Reset()
kh.Write(data)
kh.Read(h[:])
return h
}
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
b := make([]byte, 32)
d := NewKeccakState()
for _, b := range data {
d.Write(b)
}
d.Read(b)
return b
}
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
d := NewKeccakState()
for _, b := range data {
d.Write(b)
}
d.Read(h[:])
return h
}
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}
return d.Sum(nil)
}
// CreateAddress creates an ethereum address given the bytes and the nonce
func CreateAddress(b common.Address, nonce uint64) common.Address {
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})
return common.BytesToAddress(Keccak256(data)[12:])
}
// CreateAddress2 creates an ethereum address given the address bytes, initial
// contract code hash and a salt.
func CreateAddress2(b common.Address, salt [32]byte, inithash []byte) common.Address {
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], inithash)[12:])
}
// ToECDSA creates a private key with the given D value.
func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {
return toECDSA(d, true)
}
// ToECDSAUnsafe blindly converts a binary blob to a private key. It should almost
// never be used unless you are sure the input is valid and want to avoid hitting
// errors due to bad origin encoding (0 prefixes cut off).
func ToECDSAUnsafe(d []byte) *ecdsa.PrivateKey {
priv, _ := toECDSA(d, false)
return priv
}
// toECDSA creates a private key with the given D value. The strict parameter
// controls whether the key's length should be enforced at the curve size or
// it can also accept legacy encodings (0 prefixes).
func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = S256()
if strict && 8*len(d) != priv.Params().BitSize {
return nil, fmt.Errorf("invalid length, need %d bits", priv.Params().BitSize)
}
priv.D = new(big.Int).SetBytes(d)
// The priv.D must < N
if priv.D.Cmp(secp256k1N) >= 0 {
return nil, fmt.Errorf("invalid private key, >=N")
}
// The priv.D must not be zero or negative.
if priv.D.Sign() <= 0 {
return nil, fmt.Errorf("invalid private key, zero or negative")
}
priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)
if priv.PublicKey.X == nil {
return nil, errors.New("invalid private key")
}
return priv, nil
}
// FromECDSA exports a private key into a binary dump.
func FromECDSA(priv *ecdsa.PrivateKey) []byte {
if priv == nil {
return nil
}
return math.PaddedBigBytes(priv.D, priv.Params().BitSize/8)
}
// UnmarshalPubkey converts bytes to a secp256k1 public key.
func UnmarshalPubkey(pub []byte) (*ecdsa.PublicKey, error) {
x, y := elliptic.Unmarshal(S256(), pub)
if x == nil {
return nil, errInvalidPubkey
}
return &ecdsa.PublicKey{Curve: S256(), X: x, Y: y}, nil
}
func FromECDSAPub(pub *ecdsa.PublicKey) []byte {
if pub == nil || pub.X == nil || pub.Y == nil {
return nil
}
return elliptic.Marshal(S256(), pub.X, pub.Y)
}
// HexToECDSA parses a secp256k1 private key.
func HexToECDSA(hexkey string) (*ecdsa.PrivateKey, error) {
b, err := hex.DecodeString(hexkey)
if byteErr, ok := err.(hex.InvalidByteError); ok {
return nil, fmt.Errorf("invalid hex character %q in private key", byte(byteErr))
} else if err != nil {
return nil, errors.New("invalid hex data for private key")
}
return ToECDSA(b)
}
// LoadECDSA loads a secp256k1 private key from the given file.
func LoadECDSA(file string) (*ecdsa.PrivateKey, error) {
fd, err := os.Open(file)
if err != nil {
return nil, err
}
defer fd.Close()
r := bufio.NewReader(fd)
buf := make([]byte, 64)
n, err := readASCII(buf, r)
if err != nil {
return nil, err
} else if n != len(buf) {
return nil, fmt.Errorf("key file too short, want 64 hex characters")
}
if err := checkKeyFileEnd(r); err != nil {
return nil, err
}
return HexToECDSA(string(buf))
}
// readASCII reads into 'buf', stopping when the buffer is full or
// when a non-printable control character is encountered.
func readASCII(buf []byte, r *bufio.Reader) (n int, err error) {
for ; n < len(buf); n++ {
buf[n], err = r.ReadByte()
switch {
case err == io.EOF || buf[n] < '!':
return n, nil
case err != nil:
return n, err
}
}
return n, nil
}
// checkKeyFileEnd skips over additional newlines at the end of a key file.
func checkKeyFileEnd(r *bufio.Reader) error {
for i := 0; ; i++ {
b, err := r.ReadByte()
switch {
case err == io.EOF:
return nil
case err != nil:
return err
case b != '\n' && b != '\r':
return fmt.Errorf("invalid character %q at end of key file", b)
case i >= 2:
return errors.New("key file too long, want 64 hex characters")
}
}
}
// SaveECDSA saves a secp256k1 private key to the given file with
// restrictive permissions. The key data is saved hex-encoded.
func SaveECDSA(file string, key *ecdsa.PrivateKey) error {
k := hex.EncodeToString(FromECDSA(key))
return ioutil.WriteFile(file, []byte(k), 0600)
}
// GenerateKey generates a new private key.
func GenerateKey() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(S256(), rand.Reader)
}
// ValidateSignatureValues verifies whether the signature values are valid with
// the given chain rules. The v value is assumed to be either 0 or 1.
func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {
if r.Cmp(common.Big1) < 0 || s.Cmp(common.Big1) < 0 {
return false
}
// reject upper range of s values (ECDSA malleability)
// see discussion in secp256k1/libsecp256k1/include/secp256k1.h
if homestead && s.Cmp(secp256k1halfN) > 0 {
return false
}
// Frontier: allow s to be in full N range
return r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1)
}
func PubkeyToAddress(p ecdsa.PublicKey) common.Address {
pubBytes := FromECDSAPub(&p)
return common.BytesToAddress(Keccak256(pubBytes[1:])[12:])
}
func zeroBytes(bytes []byte) {
for i := range bytes {
bytes[i] = 0
}
}
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build nacl || js || cgo || gofuzz
// +build nacl js cgo gofuzz
package crypto
import (
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
"math/big"
"github.com/btcsuite/btcd/btcec"
)
// Ecrecover returns the uncompressed public key that created the given signature.
func Ecrecover(hash, sig []byte) ([]byte, error) {
pub, err := SigToPub(hash, sig)
if err != nil {
return nil, err
}
bytes := (*btcec.PublicKey)(pub).SerializeUncompressed()
return bytes, err
}
// SigToPub returns the public key that created the given signature.
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
// Convert to btcec input format with 'recovery id' v at the beginning.
btcsig := make([]byte, SignatureLength)
btcsig[0] = sig[64] + 27
copy(btcsig[1:], sig)
pub, _, err := btcec.RecoverCompact(btcec.S256(), btcsig, hash)
return (*ecdsa.PublicKey)(pub), err
}
// Sign calculates an ECDSA signature.
//
// This function is susceptible to chosen plaintext attacks that can leak
// information about the private key that is used for signing. Callers must
// be aware that the given hash cannot be chosen by an adversery. Common
// solution is to hash any input before calculating the signature.
//
// The produced signature is in the [R || S || V] format where V is 0 or 1.
func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
if len(hash) != 32 {
return nil, fmt.Errorf("hash is required to be exactly 32 bytes (%d)", len(hash))
}
if prv.Curve != btcec.S256() {
return nil, fmt.Errorf("private key curve is not secp256k1")
}
sig, err := btcec.SignCompact(btcec.S256(), (*btcec.PrivateKey)(prv), hash, false)
if err != nil {
return nil, err
}
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27
copy(sig, sig[1:])
sig[64] = v
return sig, nil
}
// VerifySignature checks that the given public key created signature over hash.
// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) format.
// The signature should have the 64 byte [R || S] format.
func VerifySignature(pubkey, hash, signature []byte) bool {
if len(signature) != 64 {
return false
}
sig := &btcec.Signature{R: new(big.Int).SetBytes(signature[:32]), S: new(big.Int).SetBytes(signature[32:])}
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
if err != nil {
return false
}
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
if sig.S.Cmp(secp256k1halfN) > 0 {
return false
}
return sig.Verify(hash, key)
}
// DecompressPubkey parses a public key in the 33-byte compressed format.
func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
if len(pubkey) != 33 {
return nil, errors.New("invalid compressed public key length")
}
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
if err != nil {
return nil, err
}
return key.ToECDSA(), nil
}
// CompressPubkey encodes a public key to the 33-byte compressed format.
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
return (*btcec.PublicKey)(pubkey).SerializeCompressed()
}
// S256 returns an instance of the secp256k1 curve.
func S256() elliptic.Curve {
return btcec.S256()
}
module github.com/ethereum/go-ethereum
go 1.17
require (
github.com/btcsuite/btcd v0.22.0-beta // indirect
github.com/holiman/uint256 v1.2.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
)
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo=
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
package main
import (
"fmt"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
func main() {
bc := &core.BlockChain{}
//engine := ethash.NewFullFaker()
core.NewStateProcessor(params.MainnetChainConfig, bc, nil)
fmt.Println("made state processor")
}
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"golang.org/x/crypto/sha3"
)
// Genesis hashes to enforce below configs on.
var (
MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
)
// TrustedCheckpoints associates each known checkpoint with the genesis hash of
// the chain it belongs to.
var TrustedCheckpoints = map[common.Hash]*TrustedCheckpoint{
MainnetGenesisHash: MainnetTrustedCheckpoint,
RopstenGenesisHash: RopstenTrustedCheckpoint,
RinkebyGenesisHash: RinkebyTrustedCheckpoint,
GoerliGenesisHash: GoerliTrustedCheckpoint,
}
// CheckpointOracles associates each known checkpoint oracles with the genesis hash of
// the chain it belongs to.
var CheckpointOracles = map[common.Hash]*CheckpointOracleConfig{
MainnetGenesisHash: MainnetCheckpointOracle,
RopstenGenesisHash: RopstenCheckpointOracle,
RinkebyGenesisHash: RinkebyCheckpointOracle,
GoerliGenesisHash: GoerliCheckpointOracle,
}
var (
// MainnetChainConfig is the chain parameters to run a node on the main network.
MainnetChainConfig = &ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(1_150_000),
DAOForkBlock: big.NewInt(1_920_000),
DAOForkSupport: true,
EIP150Block: big.NewInt(2_463_000),
EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
EIP155Block: big.NewInt(2_675_000),
EIP158Block: big.NewInt(2_675_000),
ByzantiumBlock: big.NewInt(4_370_000),
ConstantinopleBlock: big.NewInt(7_280_000),
PetersburgBlock: big.NewInt(7_280_000),
IstanbulBlock: big.NewInt(9_069_000),
MuirGlacierBlock: big.NewInt(9_200_000),
BerlinBlock: big.NewInt(12_244_000),
LondonBlock: big.NewInt(12_965_000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 395,
SectionHead: common.HexToHash("0xbfca95b8c1de014e252288e9c32029825fadbff58285f5b54556525e480dbb5b"),
CHTRoot: common.HexToHash("0x2ccf3dbb58eb6375e037fdd981ca5778359e4b8fa0270c2878b14361e64161e7"),
BloomRoot: common.HexToHash("0x2d46ec65a6941a2dc1e682f8f81f3d24192021f492fdf6ef0fdd51acb0f4ba0f"),
}
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
MainnetCheckpointOracle = &CheckpointOracleConfig{
Address: common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"),
Signers: []common.Address{
common.HexToAddress("0x1b2C260efc720BE89101890E4Db589b44E950527"), // Peter
common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
},
Threshold: 2,
}
// RopstenChainConfig contains the chain parameters to run a node on the Ropsten test network.
RopstenChainConfig = &ChainConfig{
ChainID: big.NewInt(3),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(0),
EIP150Hash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"),
EIP155Block: big.NewInt(10),
EIP158Block: big.NewInt(10),
ByzantiumBlock: big.NewInt(1_700_000),
ConstantinopleBlock: big.NewInt(4_230_000),
PetersburgBlock: big.NewInt(4_939_394),
IstanbulBlock: big.NewInt(6_485_846),
MuirGlacierBlock: big.NewInt(7_117_117),
BerlinBlock: big.NewInt(9_812_189),
LondonBlock: big.NewInt(10_499_401),
Ethash: new(EthashConfig),
}
// RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
RopstenTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 329,
SectionHead: common.HexToHash("0xe66f7038333a01fb95dc9ea03e5a2bdaf4b833cdcb9e393b9127e013bd64d39b"),
CHTRoot: common.HexToHash("0x1b0c883338ac0d032122800c155a2e73105fbfebfaa50436893282bc2d9feec5"),
BloomRoot: common.HexToHash("0x3cc98c88d283bf002378246f22c653007655cbcea6ed89f98d739f73bd341a01"),
}
// RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle.
RopstenCheckpointOracle = &CheckpointOracleConfig{
Address: common.HexToAddress("0xEF79475013f154E6A65b54cB2742867791bf0B84"),
Signers: []common.Address{
common.HexToAddress("0x32162F3581E88a5f62e8A61892B42C46E2c18f7b"), // Peter
common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
},
Threshold: 2,
}
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
RinkebyChainConfig = &ChainConfig{
ChainID: big.NewInt(4),
HomesteadBlock: big.NewInt(1),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(2),
EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1_035_301),
ConstantinopleBlock: big.NewInt(3_660_663),
PetersburgBlock: big.NewInt(4_321_234),
IstanbulBlock: big.NewInt(5_435_345),
MuirGlacierBlock: nil,
BerlinBlock: big.NewInt(8_290_928),
LondonBlock: big.NewInt(8_897_988),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
},
}
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 276,
SectionHead: common.HexToHash("0xea89a4b04e3da9bd688e316f8de669396b6d4a38a19d2cd96a00b70d58b836aa"),
CHTRoot: common.HexToHash("0xd6889d0bf6673c0d2c1cf6e9098a6fe5b30888a115b6112796aa8ee8efc4a723"),
BloomRoot: common.HexToHash("0x6009a9256b34b8bde3a3f094afb647ba5d73237546017b9025d64ac1ff54c47c"),
}
// RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle.
RinkebyCheckpointOracle = &CheckpointOracleConfig{
Address: common.HexToAddress("0xebe8eFA441B9302A0d7eaECc277c09d20D684540"),
Signers: []common.Address{
common.HexToAddress("0xd9c9cd5f6779558b6e0ed4e6acf6b1947e7fa1f3"), // Peter
common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
},
Threshold: 2,
}
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
GoerliChainConfig = &ChainConfig{
ChainID: big.NewInt(5),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(1_561_651),
MuirGlacierBlock: nil,
BerlinBlock: big.NewInt(4_460_644),
LondonBlock: big.NewInt(5_062_605),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
},
}
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 160,
SectionHead: common.HexToHash("0xb5a666c790dc35a5613d04ebba8ba47a850b45a15d9b95ad7745c35ae034b5a5"),
CHTRoot: common.HexToHash("0x6b4e00df52bdc38fa6c26c8ef595c2ad6184963ea36ab08ee744af460aa735e1"),
BloomRoot: common.HexToHash("0x8fa88f5e50190cb25243aeee262a1a9e4434a06f8d455885dcc1b5fc48c33836"),
}
// GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle.
GoerliCheckpointOracle = &CheckpointOracleConfig{
Address: common.HexToAddress("0x18CA0E045F0D772a851BC7e48357Bcaab0a0795D"),
Signers: []common.Address{
common.HexToAddress("0x4769bcaD07e3b938B7f43EB7D278Bc7Cb9efFb38"), // Peter
common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
},
Threshold: 2,
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Ethash consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
// BloomTrie) associated with the appropriate section index and head hash. It is
// used to start light syncing from this checkpoint and avoid downloading the
// entire header chain while still being able to securely access old headers/logs.
type TrustedCheckpoint struct {
SectionIndex uint64 `json:"sectionIndex"`
SectionHead common.Hash `json:"sectionHead"`
CHTRoot common.Hash `json:"chtRoot"`
BloomRoot common.Hash `json:"bloomRoot"`
}
// HashEqual returns an indicator comparing the itself hash with given one.
func (c *TrustedCheckpoint) HashEqual(hash common.Hash) bool {
if c.Empty() {
return hash == common.Hash{}
}
return c.Hash() == hash
}
// Hash returns the hash of checkpoint's four key fields(index, sectionHead, chtRoot and bloomTrieRoot).
func (c *TrustedCheckpoint) Hash() common.Hash {
var sectionIndex [8]byte
binary.BigEndian.PutUint64(sectionIndex[:], c.SectionIndex)
w := sha3.NewLegacyKeccak256()
w.Write(sectionIndex[:])
w.Write(c.SectionHead[:])
w.Write(c.CHTRoot[:])
w.Write(c.BloomRoot[:])
var h common.Hash
w.Sum(h[:0])
return h
}
// Empty returns an indicator whether the checkpoint is regarded as empty.
func (c *TrustedCheckpoint) Empty() bool {
return c.SectionHead == (common.Hash{}) || c.CHTRoot == (common.Hash{}) || c.BloomRoot == (common.Hash{})
}
// CheckpointOracleConfig represents a set of checkpoint contract(which acts as an oracle)
// config which used for light client checkpoint syncing.
type CheckpointOracleConfig struct {
Address common.Address `json:"address"`
Signers []common.Address `json:"signers"`
Threshold uint64 `json:"threshold"`
}
// ChainConfig is the core config which determines the blockchain settings.
//
// ChainConfig is stored in the database on a per block basis. This means
// that any network, identified by its genesis block, can have its own
// set of configuration options.
type ChainConfig struct {
ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork)
DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
// EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150)
EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed)
EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block
EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london)
CatalystBlock *big.Int `json:"catalystBlock,omitempty"` // Catalyst switch block (nil = no fork, 0 = already on catalyst)
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`
Clique *CliqueConfig `json:"clique,omitempty"`
}
// EthashConfig is the consensus engine configs for proof-of-work based sealing.
type EthashConfig struct{}
// String implements the stringer interface, returning the consensus engine details.
func (c *EthashConfig) String() string {
return "ethash"
}
// CliqueConfig is the consensus engine configs for proof-of-authority based sealing.
type CliqueConfig struct {
Period uint64 `json:"period"` // Number of seconds between blocks to enforce
Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint
}
// String implements the stringer interface, returning the consensus engine details.
func (c *CliqueConfig) String() string {
return "clique"
}
// String implements the fmt.Stringer interface.
func (c *ChainConfig) String() string {
var engine interface{}
switch {
case c.Ethash != nil:
engine = c.Ethash
case c.Clique != nil:
engine = c.Clique
default:
engine = "unknown"
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
c.DAOForkSupport,
c.EIP150Block,
c.EIP155Block,
c.EIP158Block,
c.ByzantiumBlock,
c.ConstantinopleBlock,
c.PetersburgBlock,
c.IstanbulBlock,
c.MuirGlacierBlock,
c.BerlinBlock,
c.LondonBlock,
engine,
)
}
// IsHomestead returns whether num is either equal to the homestead block or greater.
func (c *ChainConfig) IsHomestead(num *big.Int) bool {
return isForked(c.HomesteadBlock, num)
}
// IsDAOFork returns whether num is either equal to the DAO fork block or greater.
func (c *ChainConfig) IsDAOFork(num *big.Int) bool {
return isForked(c.DAOForkBlock, num)
}
// IsEIP150 returns whether num is either equal to the EIP150 fork block or greater.
func (c *ChainConfig) IsEIP150(num *big.Int) bool {
return isForked(c.EIP150Block, num)
}
// IsEIP155 returns whether num is either equal to the EIP155 fork block or greater.
func (c *ChainConfig) IsEIP155(num *big.Int) bool {
return isForked(c.EIP155Block, num)
}
// IsEIP158 returns whether num is either equal to the EIP158 fork block or greater.
func (c *ChainConfig) IsEIP158(num *big.Int) bool {
return isForked(c.EIP158Block, num)
}
// IsByzantium returns whether num is either equal to the Byzantium fork block or greater.
func (c *ChainConfig) IsByzantium(num *big.Int) bool {
return isForked(c.ByzantiumBlock, num)
}
// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater.
func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
return isForked(c.ConstantinopleBlock, num)
}
// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
return isForked(c.MuirGlacierBlock, num)
}
// IsPetersburg returns whether num is either
// - equal to or greater than the PetersburgBlock fork block,
// - OR is nil, and Constantinople is active
func (c *ChainConfig) IsPetersburg(num *big.Int) bool {
return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num)
}
// IsIstanbul returns whether num is either equal to the Istanbul fork block or greater.
func (c *ChainConfig) IsIstanbul(num *big.Int) bool {
return isForked(c.IstanbulBlock, num)
}
// IsBerlin returns whether num is either equal to the Berlin fork block or greater.
func (c *ChainConfig) IsBerlin(num *big.Int) bool {
return isForked(c.BerlinBlock, num)
}
// IsLondon returns whether num is either equal to the London fork block or greater.
func (c *ChainConfig) IsLondon(num *big.Int) bool {
return isForked(c.LondonBlock, num)
}
// IsCatalyst returns whether num is either equal to the Merge fork block or greater.
func (c *ChainConfig) IsCatalyst(num *big.Int) bool {
return isForked(c.CatalystBlock, num)
}
// CheckCompatible checks whether scheduled fork transitions have been imported
// with a mismatching chain configuration.
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError {
bhead := new(big.Int).SetUint64(height)
// Iterate checkCompatible to find the lowest conflict.
var lasterr *ConfigCompatError
for {
err := c.checkCompatible(newcfg, bhead)
if err == nil || (lasterr != nil && err.RewindTo == lasterr.RewindTo) {
break
}
lasterr = err
bhead.SetUint64(err.RewindTo)
}
return lasterr
}
// CheckConfigForkOrder checks that we don't "skip" any forks, geth isn't pluggable enough
// to guarantee that forks can be implemented in a different order than on official networks
func (c *ChainConfig) CheckConfigForkOrder() error {
type fork struct {
name string
block *big.Int
optional bool // if true, the fork may be nil and next fork is still allowed
}
var lastFork fork
for _, cur := range []fork{
{name: "homesteadBlock", block: c.HomesteadBlock},
{name: "daoForkBlock", block: c.DAOForkBlock, optional: true},
{name: "eip150Block", block: c.EIP150Block},
{name: "eip155Block", block: c.EIP155Block},
{name: "eip158Block", block: c.EIP158Block},
{name: "byzantiumBlock", block: c.ByzantiumBlock},
{name: "constantinopleBlock", block: c.ConstantinopleBlock},
{name: "petersburgBlock", block: c.PetersburgBlock},
{name: "istanbulBlock", block: c.IstanbulBlock},
{name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true},
{name: "berlinBlock", block: c.BerlinBlock},
{name: "londonBlock", block: c.LondonBlock},
} {
if lastFork.name != "" {
// Next one must be higher number
if lastFork.block == nil && cur.block != nil {
return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v",
lastFork.name, cur.name, cur.block)
}
if lastFork.block != nil && cur.block != nil {
if lastFork.block.Cmp(cur.block) > 0 {
return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v",
lastFork.name, lastFork.block, cur.name, cur.block)
}
}
}
// If it was optional and not set, then ignore it
if !cur.optional || cur.block != nil {
lastFork = cur
}
}
return nil
}
func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *ConfigCompatError {
if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) {
return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock)
}
if isForkIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, head) {
return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock)
}
if c.IsDAOFork(head) && c.DAOForkSupport != newcfg.DAOForkSupport {
return newCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock)
}
if isForkIncompatible(c.EIP150Block, newcfg.EIP150Block, head) {
return newCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block)
}
if isForkIncompatible(c.EIP155Block, newcfg.EIP155Block, head) {
return newCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block)
}
if isForkIncompatible(c.EIP158Block, newcfg.EIP158Block, head) {
return newCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block)
}
if c.IsEIP158(head) && !configNumEqual(c.ChainID, newcfg.ChainID) {
return newCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block)
}
if isForkIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, head) {
return newCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock)
}
if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
}
if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) {
// the only case where we allow Petersburg to be set in the past is if it is equal to Constantinople
// mainly to satisfy fork ordering requirements which state that Petersburg fork be set if Constantinople fork is set
if isForkIncompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, head) {
return newCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
}
}
if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) {
return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
}
if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
}
if isForkIncompatible(c.BerlinBlock, newcfg.BerlinBlock, head) {
return newCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock)
}
if isForkIncompatible(c.LondonBlock, newcfg.LondonBlock, head) {
return newCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock)
}
return nil
}
// isForkIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to
// block s2 because head is already past the fork.
func isForkIncompatible(s1, s2, head *big.Int) bool {
return (isForked(s1, head) || isForked(s2, head)) && !configNumEqual(s1, s2)
}
// isForked returns whether a fork scheduled at block s is active at the given head block.
func isForked(s, head *big.Int) bool {
if s == nil || head == nil {
return false
}
return s.Cmp(head) <= 0
}
func configNumEqual(x, y *big.Int) bool {
if x == nil {
return y == nil
}
if y == nil {
return x == nil
}
return x.Cmp(y) == 0
}
// ConfigCompatError is raised if the locally-stored blockchain is initialised with a
// ChainConfig that would alter the past.
type ConfigCompatError struct {
What string
// block numbers of the stored and new configurations
StoredConfig, NewConfig *big.Int
// the block number to which the local chain must be rewound to correct the error
RewindTo uint64
}
func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError {
var rew *big.Int
switch {
case storedblock == nil:
rew = newblock
case newblock == nil || storedblock.Cmp(newblock) < 0:
rew = storedblock
default:
rew = newblock
}
err := &ConfigCompatError{what, storedblock, newblock, 0}
if rew != nil && rew.Sign() > 0 {
err.RewindTo = rew.Uint64() - 1
}
return err
}
func (err *ConfigCompatError) Error() string {
return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo)
}
// Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions
// that do not have or require information about the block.
//
// Rules is a one time interface meaning that it shouldn't be used in between transition
// phases.
type Rules struct {
ChainID *big.Int
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsBerlin, IsLondon, IsCatalyst bool
}
// Rules ensures c's ChainID is not nil.
func (c *ChainConfig) Rules(num *big.Int) Rules {
chainID := c.ChainID
if chainID == nil {
chainID = new(big.Int)
}
return Rules{
ChainID: new(big.Int).Set(chainID),
IsHomestead: c.IsHomestead(num),
IsEIP150: c.IsEIP150(num),
IsEIP155: c.IsEIP155(num),
IsEIP158: c.IsEIP158(num),
IsByzantium: c.IsByzantium(num),
IsConstantinople: c.IsConstantinople(num),
IsPetersburg: c.IsPetersburg(num),
IsIstanbul: c.IsIstanbul(num),
IsBerlin: c.IsBerlin(num),
IsLondon: c.IsLondon(num),
IsCatalyst: c.IsCatalyst(num),
}
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rlp
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"reflect"
"strings"
"sync"
)
//lint:ignore ST1012 EOL is not an error.
// EOL is returned when the end of the current list
// has been reached during streaming.
var EOL = errors.New("rlp: end of list")
var (
ErrExpectedString = errors.New("rlp: expected String or Byte")
ErrExpectedList = errors.New("rlp: expected List")
ErrCanonInt = errors.New("rlp: non-canonical integer format")
ErrCanonSize = errors.New("rlp: non-canonical size information")
ErrElemTooLarge = errors.New("rlp: element is larger than containing list")
ErrValueTooLarge = errors.New("rlp: value size exceeds available input length")
ErrMoreThanOneValue = errors.New("rlp: input contains more than one value")
// internal errors
errNotInList = errors.New("rlp: call of ListEnd outside of any list")
errNotAtEOL = errors.New("rlp: call of ListEnd not positioned at EOL")
errUintOverflow = errors.New("rlp: uint overflow")
errNoPointer = errors.New("rlp: interface given to Decode must be a pointer")
errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil")
streamPool = sync.Pool{
New: func() interface{} { return new(Stream) },
}
)
// Decoder is implemented by types that require custom RLP decoding rules or need to decode
// into private fields.
//
// The DecodeRLP method should read one value from the given Stream. It is not forbidden to
// read less or more, but it might be confusing.
type Decoder interface {
DecodeRLP(*Stream) error
}
// Decode parses RLP-encoded data from r and stores the result in the value pointed to by
// val. Please see package-level documentation for the decoding rules. Val must be a
// non-nil pointer.
//
// If r does not implement ByteReader, Decode will do its own buffering.
//
// Note that Decode does not set an input limit for all readers and may be vulnerable to
// panics cause by huge value sizes. If you need an input limit, use
//
// NewStream(r, limit).Decode(val)
func Decode(r io.Reader, val interface{}) error {
stream := streamPool.Get().(*Stream)
defer streamPool.Put(stream)
stream.Reset(r, 0)
return stream.Decode(val)
}
// DecodeBytes parses RLP data from b into val. Please see package-level documentation for
// the decoding rules. The input must contain exactly one value and no trailing data.
func DecodeBytes(b []byte, val interface{}) error {
r := bytes.NewReader(b)
stream := streamPool.Get().(*Stream)
defer streamPool.Put(stream)
stream.Reset(r, uint64(len(b)))
if err := stream.Decode(val); err != nil {
return err
}
if r.Len() > 0 {
return ErrMoreThanOneValue
}
return nil
}
type decodeError struct {
msg string
typ reflect.Type
ctx []string
}
func (err *decodeError) Error() string {
ctx := ""
if len(err.ctx) > 0 {
ctx = ", decoding into "
for i := len(err.ctx) - 1; i >= 0; i-- {
ctx += err.ctx[i]
}
}
return fmt.Sprintf("rlp: %s for %v%s", err.msg, err.typ, ctx)
}
func wrapStreamError(err error, typ reflect.Type) error {
switch err {
case ErrCanonInt:
return &decodeError{msg: "non-canonical integer (leading zero bytes)", typ: typ}
case ErrCanonSize:
return &decodeError{msg: "non-canonical size information", typ: typ}
case ErrExpectedList:
return &decodeError{msg: "expected input list", typ: typ}
case ErrExpectedString:
return &decodeError{msg: "expected input string or byte", typ: typ}
case errUintOverflow:
return &decodeError{msg: "input string too long", typ: typ}
case errNotAtEOL:
return &decodeError{msg: "input list has too many elements", typ: typ}
}
return err
}
func addErrorContext(err error, ctx string) error {
if decErr, ok := err.(*decodeError); ok {
decErr.ctx = append(decErr.ctx, ctx)
}
return err
}
var (
decoderInterface = reflect.TypeOf(new(Decoder)).Elem()
bigInt = reflect.TypeOf(big.Int{})
)
func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {
kind := typ.Kind()
switch {
case typ == rawValueType:
return decodeRawValue, nil
case typ.AssignableTo(reflect.PtrTo(bigInt)):
return decodeBigInt, nil
case typ.AssignableTo(bigInt):
return decodeBigIntNoPtr, nil
case kind == reflect.Ptr:
return makePtrDecoder(typ, tags)
case reflect.PtrTo(typ).Implements(decoderInterface):
return decodeDecoder, nil
case isUint(kind):
return decodeUint, nil
case kind == reflect.Bool:
return decodeBool, nil
case kind == reflect.String:
return decodeString, nil
case kind == reflect.Slice || kind == reflect.Array:
return makeListDecoder(typ, tags)
case kind == reflect.Struct:
return makeStructDecoder(typ)
case kind == reflect.Interface:
return decodeInterface, nil
default:
return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ)
}
}
func decodeRawValue(s *Stream, val reflect.Value) error {
r, err := s.Raw()
if err != nil {
return err
}
val.SetBytes(r)
return nil
}
func decodeUint(s *Stream, val reflect.Value) error {
typ := val.Type()
num, err := s.uint(typ.Bits())
if err != nil {
return wrapStreamError(err, val.Type())
}
val.SetUint(num)
return nil
}
func decodeBool(s *Stream, val reflect.Value) error {
b, err := s.Bool()
if err != nil {
return wrapStreamError(err, val.Type())
}
val.SetBool(b)
return nil
}
func decodeString(s *Stream, val reflect.Value) error {
b, err := s.Bytes()
if err != nil {
return wrapStreamError(err, val.Type())
}
val.SetString(string(b))
return nil
}
func decodeBigIntNoPtr(s *Stream, val reflect.Value) error {
return decodeBigInt(s, val.Addr())
}
func decodeBigInt(s *Stream, val reflect.Value) error {
var buffer []byte
kind, size, err := s.Kind()
switch {
case err != nil:
return wrapStreamError(err, val.Type())
case kind == List:
return wrapStreamError(ErrExpectedString, val.Type())
case kind == Byte:
buffer = s.uintbuf[:1]
buffer[0] = s.byteval
s.kind = -1 // re-arm Kind
case size == 0:
// Avoid zero-length read.
s.kind = -1
case size <= uint64(len(s.uintbuf)):
// For integers smaller than s.uintbuf, allocating a buffer
// can be avoided.
buffer = s.uintbuf[:size]
if err := s.readFull(buffer); err != nil {
return wrapStreamError(err, val.Type())
}
// Reject inputs where single byte encoding should have been used.
if size == 1 && buffer[0] < 128 {
return wrapStreamError(ErrCanonSize, val.Type())
}
default:
// For large integers, a temporary buffer is needed.
buffer = make([]byte, size)
if err := s.readFull(buffer); err != nil {
return wrapStreamError(err, val.Type())
}
}
// Reject leading zero bytes.
if len(buffer) > 0 && buffer[0] == 0 {
return wrapStreamError(ErrCanonInt, val.Type())
}
// Set the integer bytes.
i := val.Interface().(*big.Int)
if i == nil {
i = new(big.Int)
val.Set(reflect.ValueOf(i))
}
i.SetBytes(buffer)
return nil
}
func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
etype := typ.Elem()
if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) {
if typ.Kind() == reflect.Array {
return decodeByteArray, nil
}
return decodeByteSlice, nil
}
etypeinfo := theTC.infoWhileGenerating(etype, tags{})
if etypeinfo.decoderErr != nil {
return nil, etypeinfo.decoderErr
}
var dec decoder
switch {
case typ.Kind() == reflect.Array:
dec = func(s *Stream, val reflect.Value) error {
return decodeListArray(s, val, etypeinfo.decoder)
}
case tag.tail:
// A slice with "tail" tag can occur as the last field
// of a struct and is supposed to swallow all remaining
// list elements. The struct decoder already called s.List,
// proceed directly to decoding the elements.
dec = func(s *Stream, val reflect.Value) error {
return decodeSliceElems(s, val, etypeinfo.decoder)
}
default:
dec = func(s *Stream, val reflect.Value) error {
return decodeListSlice(s, val, etypeinfo.decoder)
}
}
return dec, nil
}
func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error {
size, err := s.List()
if err != nil {
return wrapStreamError(err, val.Type())
}
if size == 0 {
val.Set(reflect.MakeSlice(val.Type(), 0, 0))
return s.ListEnd()
}
if err := decodeSliceElems(s, val, elemdec); err != nil {
return err
}
return s.ListEnd()
}
func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error {
i := 0
for ; ; i++ {
// grow slice if necessary
if i >= val.Cap() {
newcap := val.Cap() + val.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(val.Type(), val.Len(), newcap)
reflect.Copy(newv, val)
val.Set(newv)
}
if i >= val.Len() {
val.SetLen(i + 1)
}
// decode into element
if err := elemdec(s, val.Index(i)); err == EOL {
break
} else if err != nil {
return addErrorContext(err, fmt.Sprint("[", i, "]"))
}
}
if i < val.Len() {
val.SetLen(i)
}
return nil
}
func decodeListArray(s *Stream, val reflect.Value, elemdec decoder) error {
if _, err := s.List(); err != nil {
return wrapStreamError(err, val.Type())
}
vlen := val.Len()
i := 0
for ; i < vlen; i++ {
if err := elemdec(s, val.Index(i)); err == EOL {
break
} else if err != nil {
return addErrorContext(err, fmt.Sprint("[", i, "]"))
}
}
if i < vlen {
return &decodeError{msg: "input list has too few elements", typ: val.Type()}
}
return wrapStreamError(s.ListEnd(), val.Type())
}
func decodeByteSlice(s *Stream, val reflect.Value) error {
b, err := s.Bytes()
if err != nil {
return wrapStreamError(err, val.Type())
}
val.SetBytes(b)
return nil
}
func decodeByteArray(s *Stream, val reflect.Value) error {
kind, size, err := s.Kind()
if err != nil {
return err
}
slice := byteArrayBytes(val)
switch kind {
case Byte:
if len(slice) == 0 {
return &decodeError{msg: "input string too long", typ: val.Type()}
} else if len(slice) > 1 {
return &decodeError{msg: "input string too short", typ: val.Type()}
}
slice[0] = s.byteval
s.kind = -1
case String:
if uint64(len(slice)) < size {
return &decodeError{msg: "input string too long", typ: val.Type()}
}
if uint64(len(slice)) > size {
return &decodeError{msg: "input string too short", typ: val.Type()}
}
if err := s.readFull(slice); err != nil {
return err
}
// Reject cases where single byte encoding should have been used.
if size == 1 && slice[0] < 128 {
return wrapStreamError(ErrCanonSize, val.Type())
}
case List:
return wrapStreamError(ErrExpectedString, val.Type())
}
return nil
}
func makeStructDecoder(typ reflect.Type) (decoder, error) {
fields, err := structFields(typ)
if err != nil {
return nil, err
}
for _, f := range fields {
if f.info.decoderErr != nil {
return nil, structFieldError{typ, f.index, f.info.decoderErr}
}
}
dec := func(s *Stream, val reflect.Value) (err error) {
if _, err := s.List(); err != nil {
return wrapStreamError(err, typ)
}
for i, f := range fields {
err := f.info.decoder(s, val.Field(f.index))
if err == EOL {
if f.optional {
// The field is optional, so reaching the end of the list before
// reaching the last field is acceptable. All remaining undecoded
// fields are zeroed.
zeroFields(val, fields[i:])
break
}
return &decodeError{msg: "too few elements", typ: typ}
} else if err != nil {
return addErrorContext(err, "."+typ.Field(f.index).Name)
}
}
return wrapStreamError(s.ListEnd(), typ)
}
return dec, nil
}
func zeroFields(structval reflect.Value, fields []field) {
for _, f := range fields {
fv := structval.Field(f.index)
fv.Set(reflect.Zero(fv.Type()))
}
}
// makePtrDecoder creates a decoder that decodes into the pointer's element type.
func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) {
etype := typ.Elem()
etypeinfo := theTC.infoWhileGenerating(etype, tags{})
switch {
case etypeinfo.decoderErr != nil:
return nil, etypeinfo.decoderErr
case !tag.nilOK:
return makeSimplePtrDecoder(etype, etypeinfo), nil
default:
return makeNilPtrDecoder(etype, etypeinfo, tag.nilKind), nil
}
}
func makeSimplePtrDecoder(etype reflect.Type, etypeinfo *typeinfo) decoder {
return func(s *Stream, val reflect.Value) (err error) {
newval := val
if val.IsNil() {
newval = reflect.New(etype)
}
if err = etypeinfo.decoder(s, newval.Elem()); err == nil {
val.Set(newval)
}
return err
}
}
// makeNilPtrDecoder creates a decoder that decodes empty values as nil. Non-empty
// values are decoded into a value of the element type, just like makePtrDecoder does.
//
// This decoder is used for pointer-typed struct fields with struct tag "nil".
func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, nilKind Kind) decoder {
typ := reflect.PtrTo(etype)
nilPtr := reflect.Zero(typ)
return func(s *Stream, val reflect.Value) (err error) {
kind, size, err := s.Kind()
if err != nil {
val.Set(nilPtr)
return wrapStreamError(err, typ)
}
// Handle empty values as a nil pointer.
if kind != Byte && size == 0 {
if kind != nilKind {
return &decodeError{
msg: fmt.Sprintf("wrong kind of empty value (got %v, want %v)", kind, nilKind),
typ: typ,
}
}
// rearm s.Kind. This is important because the input
// position must advance to the next value even though
// we don't read anything.
s.kind = -1
val.Set(nilPtr)
return nil
}
newval := val
if val.IsNil() {
newval = reflect.New(etype)
}
if err = etypeinfo.decoder(s, newval.Elem()); err == nil {
val.Set(newval)
}
return err
}
}
var ifsliceType = reflect.TypeOf([]interface{}{})
func decodeInterface(s *Stream, val reflect.Value) error {
if val.Type().NumMethod() != 0 {
return fmt.Errorf("rlp: type %v is not RLP-serializable", val.Type())
}
kind, _, err := s.Kind()
if err != nil {
return err
}
if kind == List {
slice := reflect.New(ifsliceType).Elem()
if err := decodeListSlice(s, slice, decodeInterface); err != nil {
return err
}
val.Set(slice)
} else {
b, err := s.Bytes()
if err != nil {
return err
}
val.Set(reflect.ValueOf(b))
}
return nil
}
func decodeDecoder(s *Stream, val reflect.Value) error {
return val.Addr().Interface().(Decoder).DecodeRLP(s)
}
// Kind represents the kind of value contained in an RLP stream.
type Kind int8
const (
Byte Kind = iota
String
List
)
func (k Kind) String() string {
switch k {
case Byte:
return "Byte"
case String:
return "String"
case List:
return "List"
default:
return fmt.Sprintf("Unknown(%d)", k)
}
}
// ByteReader must be implemented by any input reader for a Stream. It
// is implemented by e.g. bufio.Reader and bytes.Reader.
type ByteReader interface {
io.Reader
io.ByteReader
}
// Stream can be used for piecemeal decoding of an input stream. This
// is useful if the input is very large or if the decoding rules for a
// type depend on the input structure. Stream does not keep an
// internal buffer. After decoding a value, the input reader will be
// positioned just before the type information for the next value.
//
// When decoding a list and the input position reaches the declared
// length of the list, all operations will return error EOL.
// The end of the list must be acknowledged using ListEnd to continue
// reading the enclosing list.
//
// Stream is not safe for concurrent use.
type Stream struct {
r ByteReader
remaining uint64 // number of bytes remaining to be read from r
size uint64 // size of value ahead
kinderr error // error from last readKind
stack []uint64 // list sizes
uintbuf [32]byte // auxiliary buffer for integer decoding
kind Kind // kind of value ahead
byteval byte // value of single byte in type tag
limited bool // true if input limit is in effect
}
// NewStream creates a new decoding stream reading from r.
//
// If r implements the ByteReader interface, Stream will
// not introduce any buffering.
//
// For non-toplevel values, Stream returns ErrElemTooLarge
// for values that do not fit into the enclosing list.
//
// Stream supports an optional input limit. If a limit is set, the
// size of any toplevel value will be checked against the remaining
// input length. Stream operations that encounter a value exceeding
// the remaining input length will return ErrValueTooLarge. The limit
// can be set by passing a non-zero value for inputLimit.
//
// If r is a bytes.Reader or strings.Reader, the input limit is set to
// the length of r's underlying data unless an explicit limit is
// provided.
func NewStream(r io.Reader, inputLimit uint64) *Stream {
s := new(Stream)
s.Reset(r, inputLimit)
return s
}
// NewListStream creates a new stream that pretends to be positioned
// at an encoded list of the given length.
func NewListStream(r io.Reader, len uint64) *Stream {
s := new(Stream)
s.Reset(r, len)
s.kind = List
s.size = len
return s
}
// Bytes reads an RLP string and returns its contents as a byte slice.
// If the input does not contain an RLP string, the returned
// error will be ErrExpectedString.
func (s *Stream) Bytes() ([]byte, error) {
kind, size, err := s.Kind()
if err != nil {
return nil, err
}
switch kind {
case Byte:
s.kind = -1 // rearm Kind
return []byte{s.byteval}, nil
case String:
b := make([]byte, size)
if err = s.readFull(b); err != nil {
return nil, err
}
if size == 1 && b[0] < 128 {
return nil, ErrCanonSize
}
return b, nil
default:
return nil, ErrExpectedString
}
}
// Raw reads a raw encoded value including RLP type information.
func (s *Stream) Raw() ([]byte, error) {
kind, size, err := s.Kind()
if err != nil {
return nil, err
}
if kind == Byte {
s.kind = -1 // rearm Kind
return []byte{s.byteval}, nil
}
// The original header has already been read and is no longer
// available. Read content and put a new header in front of it.
start := headsize(size)
buf := make([]byte, uint64(start)+size)
if err := s.readFull(buf[start:]); err != nil {
return nil, err
}
if kind == String {
puthead(buf, 0x80, 0xB7, size)
} else {
puthead(buf, 0xC0, 0xF7, size)
}
return buf, nil
}
// Uint reads an RLP string of up to 8 bytes and returns its contents
// as an unsigned integer. If the input does not contain an RLP string, the
// returned error will be ErrExpectedString.
func (s *Stream) Uint() (uint64, error) {
return s.uint(64)
}
func (s *Stream) uint(maxbits int) (uint64, error) {
kind, size, err := s.Kind()
if err != nil {
return 0, err
}
switch kind {
case Byte:
if s.byteval == 0 {
return 0, ErrCanonInt
}
s.kind = -1 // rearm Kind
return uint64(s.byteval), nil
case String:
if size > uint64(maxbits/8) {
return 0, errUintOverflow
}
v, err := s.readUint(byte(size))
switch {
case err == ErrCanonSize:
// Adjust error because we're not reading a size right now.
return 0, ErrCanonInt
case err != nil:
return 0, err
case size > 0 && v < 128:
return 0, ErrCanonSize
default:
return v, nil
}
default:
return 0, ErrExpectedString
}
}
// Bool reads an RLP string of up to 1 byte and returns its contents
// as a boolean. If the input does not contain an RLP string, the
// returned error will be ErrExpectedString.
func (s *Stream) Bool() (bool, error) {
num, err := s.uint(8)
if err != nil {
return false, err
}
switch num {
case 0:
return false, nil
case 1:
return true, nil
default:
return false, fmt.Errorf("rlp: invalid boolean value: %d", num)
}
}
// List starts decoding an RLP list. If the input does not contain a
// list, the returned error will be ErrExpectedList. When the list's
// end has been reached, any Stream operation will return EOL.
func (s *Stream) List() (size uint64, err error) {
kind, size, err := s.Kind()
if err != nil {
return 0, err
}
if kind != List {
return 0, ErrExpectedList
}
// Remove size of inner list from outer list before pushing the new size
// onto the stack. This ensures that the remaining outer list size will
// be correct after the matching call to ListEnd.
if inList, limit := s.listLimit(); inList {
s.stack[len(s.stack)-1] = limit - size
}
s.stack = append(s.stack, size)
s.kind = -1
s.size = 0
return size, nil
}
// ListEnd returns to the enclosing list.
// The input reader must be positioned at the end of a list.
func (s *Stream) ListEnd() error {
// Ensure that no more data is remaining in the current list.
if inList, listLimit := s.listLimit(); !inList {
return errNotInList
} else if listLimit > 0 {
return errNotAtEOL
}
s.stack = s.stack[:len(s.stack)-1] // pop
s.kind = -1
s.size = 0
return nil
}
// Decode decodes a value and stores the result in the value pointed
// to by val. Please see the documentation for the Decode function
// to learn about the decoding rules.
func (s *Stream) Decode(val interface{}) error {
if val == nil {
return errDecodeIntoNil
}
rval := reflect.ValueOf(val)
rtyp := rval.Type()
if rtyp.Kind() != reflect.Ptr {
return errNoPointer
}
if rval.IsNil() {
return errDecodeIntoNil
}
decoder, err := cachedDecoder(rtyp.Elem())
if err != nil {
return err
}
err = decoder(s, rval.Elem())
if decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 {
// Add decode target type to error so context has more meaning.
decErr.ctx = append(decErr.ctx, fmt.Sprint("(", rtyp.Elem(), ")"))
}
return err
}
// Reset discards any information about the current decoding context
// and starts reading from r. This method is meant to facilitate reuse
// of a preallocated Stream across many decoding operations.
//
// If r does not also implement ByteReader, Stream will do its own
// buffering.
func (s *Stream) Reset(r io.Reader, inputLimit uint64) {
if inputLimit > 0 {
s.remaining = inputLimit
s.limited = true
} else {
// Attempt to automatically discover
// the limit when reading from a byte slice.
switch br := r.(type) {
case *bytes.Reader:
s.remaining = uint64(br.Len())
s.limited = true
case *bytes.Buffer:
s.remaining = uint64(br.Len())
s.limited = true
case *strings.Reader:
s.remaining = uint64(br.Len())
s.limited = true
default:
s.limited = false
}
}
// Wrap r with a buffer if it doesn't have one.
bufr, ok := r.(ByteReader)
if !ok {
bufr = bufio.NewReader(r)
}
s.r = bufr
// Reset the decoding context.
s.stack = s.stack[:0]
s.size = 0
s.kind = -1
s.kinderr = nil
s.byteval = 0
s.uintbuf = [32]byte{}
}
// Kind returns the kind and size of the next value in the
// input stream.
//
// The returned size is the number of bytes that make up the value.
// For kind == Byte, the size is zero because the value is
// contained in the type tag.
//
// The first call to Kind will read size information from the input
// reader and leave it positioned at the start of the actual bytes of
// the value. Subsequent calls to Kind (until the value is decoded)
// will not advance the input reader and return cached information.
func (s *Stream) Kind() (kind Kind, size uint64, err error) {
if s.kind >= 0 {
return s.kind, s.size, s.kinderr
}
// Check for end of list. This needs to be done here because readKind
// checks against the list size, and would return the wrong error.
inList, listLimit := s.listLimit()
if inList && listLimit == 0 {
return 0, 0, EOL
}
// Read the actual size tag.
s.kind, s.size, s.kinderr = s.readKind()
if s.kinderr == nil {
// Check the data size of the value ahead against input limits. This
// is done here because many decoders require allocating an input
// buffer matching the value size. Checking it here protects those
// decoders from inputs declaring very large value size.
if inList && s.size > listLimit {
s.kinderr = ErrElemTooLarge
} else if s.limited && s.size > s.remaining {
s.kinderr = ErrValueTooLarge
}
}
return s.kind, s.size, s.kinderr
}
func (s *Stream) readKind() (kind Kind, size uint64, err error) {
b, err := s.readByte()
if err != nil {
if len(s.stack) == 0 {
// At toplevel, Adjust the error to actual EOF. io.EOF is
// used by callers to determine when to stop decoding.
switch err {
case io.ErrUnexpectedEOF:
err = io.EOF
case ErrValueTooLarge:
err = io.EOF
}
}
return 0, 0, err
}
s.byteval = 0
switch {
case b < 0x80:
// For a single byte whose value is in the [0x00, 0x7F] range, that byte
// is its own RLP encoding.
s.byteval = b
return Byte, 0, nil
case b < 0xB8:
// Otherwise, if a string is 0-55 bytes long, the RLP encoding consists
// of a single byte with value 0x80 plus the length of the string
// followed by the string. The range of the first byte is thus [0x80, 0xB7].
return String, uint64(b - 0x80), nil
case b < 0xC0:
// If a string is more than 55 bytes long, the RLP encoding consists of a
// single byte with value 0xB7 plus the length of the length of the
// string in binary form, followed by the length of the string, followed
// by the string. For example, a length-1024 string would be encoded as
// 0xB90400 followed by the string. The range of the first byte is thus
// [0xB8, 0xBF].
size, err = s.readUint(b - 0xB7)
if err == nil && size < 56 {
err = ErrCanonSize
}
return String, size, err
case b < 0xF8:
// If the total payload of a list (i.e. the combined length of all its
// items) is 0-55 bytes long, the RLP encoding consists of a single byte
// with value 0xC0 plus the length of the list followed by the
// concatenation of the RLP encodings of the items. The range of the
// first byte is thus [0xC0, 0xF7].
return List, uint64(b - 0xC0), nil
default:
// If the total payload of a list is more than 55 bytes long, the RLP
// encoding consists of a single byte with value 0xF7 plus the length of
// the length of the payload in binary form, followed by the length of
// the payload, followed by the concatenation of the RLP encodings of
// the items. The range of the first byte is thus [0xF8, 0xFF].
size, err = s.readUint(b - 0xF7)
if err == nil && size < 56 {
err = ErrCanonSize
}
return List, size, err
}
}
func (s *Stream) readUint(size byte) (uint64, error) {
switch size {
case 0:
s.kind = -1 // rearm Kind
return 0, nil
case 1:
b, err := s.readByte()
return uint64(b), err
default:
buffer := s.uintbuf[:8]
for i := range buffer {
buffer[i] = 0
}
start := int(8 - size)
if err := s.readFull(buffer[start:]); err != nil {
return 0, err
}
if buffer[start] == 0 {
// Note: readUint is also used to decode integer values.
// The error needs to be adjusted to become ErrCanonInt in this case.
return 0, ErrCanonSize
}
return binary.BigEndian.Uint64(buffer[:]), nil
}
}
// readFull reads into buf from the underlying stream.
func (s *Stream) readFull(buf []byte) (err error) {
if err := s.willRead(uint64(len(buf))); err != nil {
return err
}
var nn, n int
for n < len(buf) && err == nil {
nn, err = s.r.Read(buf[n:])
n += nn
}
if err == io.EOF {
if n < len(buf) {
err = io.ErrUnexpectedEOF
} else {
// Readers are allowed to give EOF even though the read succeeded.
// In such cases, we discard the EOF, like io.ReadFull() does.
err = nil
}
}
return err
}
// readByte reads a single byte from the underlying stream.
func (s *Stream) readByte() (byte, error) {
if err := s.willRead(1); err != nil {
return 0, err
}
b, err := s.r.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return b, err
}
// willRead is called before any read from the underlying stream. It checks
// n against size limits, and updates the limits if n doesn't overflow them.
func (s *Stream) willRead(n uint64) error {
s.kind = -1 // rearm Kind
if inList, limit := s.listLimit(); inList {
if n > limit {
return ErrElemTooLarge
}
s.stack[len(s.stack)-1] = limit - n
}
if s.limited {
if n > s.remaining {
return ErrValueTooLarge
}
s.remaining -= n
}
return nil
}
// listLimit returns the amount of data remaining in the innermost list.
func (s *Stream) listLimit() (inList bool, limit uint64) {
if len(s.stack) == 0 {
return false, 0
}
return true, s.stack[len(s.stack)-1]
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rlp
import (
"fmt"
"io"
"math/big"
"reflect"
"sync"
)
var (
// Common encoded values.
// These are useful when implementing EncodeRLP.
EmptyString = []byte{0x80}
EmptyList = []byte{0xC0}
)
// Encoder is implemented by types that require custom
// encoding rules or want to encode private fields.
type Encoder interface {
// EncodeRLP should write the RLP encoding of its receiver to w.
// If the implementation is a pointer method, it may also be
// called for nil pointers.
//
// Implementations should generate valid RLP. The data written is
// not verified at the moment, but a future version might. It is
// recommended to write only a single value but writing multiple
// values or no value at all is also permitted.
EncodeRLP(io.Writer) error
}
// Encode writes the RLP encoding of val to w. Note that Encode may
// perform many small writes in some cases. Consider making w
// buffered.
//
// Please see package-level documentation of encoding rules.
func Encode(w io.Writer, val interface{}) error {
if outer, ok := w.(*encbuf); ok {
// Encode was called by some type's EncodeRLP.
// Avoid copying by writing to the outer encbuf directly.
return outer.encode(val)
}
eb := encbufPool.Get().(*encbuf)
defer encbufPool.Put(eb)
eb.reset()
if err := eb.encode(val); err != nil {
return err
}
return eb.toWriter(w)
}
// EncodeToBytes returns the RLP encoding of val.
// Please see package-level documentation for the encoding rules.
func EncodeToBytes(val interface{}) ([]byte, error) {
eb := encbufPool.Get().(*encbuf)
defer encbufPool.Put(eb)
eb.reset()
if err := eb.encode(val); err != nil {
return nil, err
}
return eb.toBytes(), nil
}
// EncodeToReader returns a reader from which the RLP encoding of val
// can be read. The returned size is the total size of the encoded
// data.
//
// Please see the documentation of Encode for the encoding rules.
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
eb := encbufPool.Get().(*encbuf)
eb.reset()
if err := eb.encode(val); err != nil {
return 0, nil, err
}
return eb.size(), &encReader{buf: eb}, nil
}
type listhead struct {
offset int // index of this header in string data
size int // total size of encoded data (including list headers)
}
// encode writes head to the given buffer, which must be at least
// 9 bytes long. It returns the encoded bytes.
func (head *listhead) encode(buf []byte) []byte {
return buf[:puthead(buf, 0xC0, 0xF7, uint64(head.size))]
}
// headsize returns the size of a list or string header
// for a value of the given size.
func headsize(size uint64) int {
if size < 56 {
return 1
}
return 1 + intsize(size)
}
// puthead writes a list or string header to buf.
// buf must be at least 9 bytes long.
func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
if size < 56 {
buf[0] = smalltag + byte(size)
return 1
}
sizesize := putint(buf[1:], size)
buf[0] = largetag + byte(sizesize)
return sizesize + 1
}
type encbuf struct {
str []byte // string data, contains everything except list headers
lheads []listhead // all list headers
lhsize int // sum of sizes of all encoded list headers
sizebuf [9]byte // auxiliary buffer for uint encoding
}
// encbufs are pooled.
var encbufPool = sync.Pool{
New: func() interface{} { return new(encbuf) },
}
func (w *encbuf) reset() {
w.lhsize = 0
w.str = w.str[:0]
w.lheads = w.lheads[:0]
}
// encbuf implements io.Writer so it can be passed it into EncodeRLP.
func (w *encbuf) Write(b []byte) (int, error) {
w.str = append(w.str, b...)
return len(b), nil
}
func (w *encbuf) encode(val interface{}) error {
rval := reflect.ValueOf(val)
writer, err := cachedWriter(rval.Type())
if err != nil {
return err
}
return writer(rval, w)
}
func (w *encbuf) encodeStringHeader(size int) {
if size < 56 {
w.str = append(w.str, 0x80+byte(size))
} else {
sizesize := putint(w.sizebuf[1:], uint64(size))
w.sizebuf[0] = 0xB7 + byte(sizesize)
w.str = append(w.str, w.sizebuf[:sizesize+1]...)
}
}
func (w *encbuf) encodeString(b []byte) {
if len(b) == 1 && b[0] <= 0x7F {
// fits single byte, no string header
w.str = append(w.str, b[0])
} else {
w.encodeStringHeader(len(b))
w.str = append(w.str, b...)
}
}
func (w *encbuf) encodeUint(i uint64) {
if i == 0 {
w.str = append(w.str, 0x80)
} else if i < 128 {
// fits single byte
w.str = append(w.str, byte(i))
} else {
s := putint(w.sizebuf[1:], i)
w.sizebuf[0] = 0x80 + byte(s)
w.str = append(w.str, w.sizebuf[:s+1]...)
}
}
// list adds a new list header to the header stack. It returns the index
// of the header. The caller must call listEnd with this index after encoding
// the content of the list.
func (w *encbuf) list() int {
w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
return len(w.lheads) - 1
}
func (w *encbuf) listEnd(index int) {
lh := &w.lheads[index]
lh.size = w.size() - lh.offset - lh.size
if lh.size < 56 {
w.lhsize++ // length encoded into kind tag
} else {
w.lhsize += 1 + intsize(uint64(lh.size))
}
}
func (w *encbuf) size() int {
return len(w.str) + w.lhsize
}
func (w *encbuf) toBytes() []byte {
out := make([]byte, w.size())
strpos := 0
pos := 0
for _, head := range w.lheads {
// write string data before header
n := copy(out[pos:], w.str[strpos:head.offset])
pos += n
strpos += n
// write the header
enc := head.encode(out[pos:])
pos += len(enc)
}
// copy string data after the last list header
copy(out[pos:], w.str[strpos:])
return out
}
func (w *encbuf) toWriter(out io.Writer) (err error) {
strpos := 0
for _, head := range w.lheads {
// write string data before header
if head.offset-strpos > 0 {
n, err := out.Write(w.str[strpos:head.offset])
strpos += n
if err != nil {
return err
}
}
// write the header
enc := head.encode(w.sizebuf[:])
if _, err = out.Write(enc); err != nil {
return err
}
}
if strpos < len(w.str) {
// write string data after the last list header
_, err = out.Write(w.str[strpos:])
}
return err
}
// encReader is the io.Reader returned by EncodeToReader.
// It releases its encbuf at EOF.
type encReader struct {
buf *encbuf // the buffer we're reading from. this is nil when we're at EOF.
lhpos int // index of list header that we're reading
strpos int // current position in string buffer
piece []byte // next piece to be read
}
func (r *encReader) Read(b []byte) (n int, err error) {
for {
if r.piece = r.next(); r.piece == nil {
// Put the encode buffer back into the pool at EOF when it
// is first encountered. Subsequent calls still return EOF
// as the error but the buffer is no longer valid.
if r.buf != nil {
encbufPool.Put(r.buf)
r.buf = nil
}
return n, io.EOF
}
nn := copy(b[n:], r.piece)
n += nn
if nn < len(r.piece) {
// piece didn't fit, see you next time.
r.piece = r.piece[nn:]
return n, nil
}
r.piece = nil
}
}
// next returns the next piece of data to be read.
// it returns nil at EOF.
func (r *encReader) next() []byte {
switch {
case r.buf == nil:
return nil
case r.piece != nil:
// There is still data available for reading.
return r.piece
case r.lhpos < len(r.buf.lheads):
// We're before the last list header.
head := r.buf.lheads[r.lhpos]
sizebefore := head.offset - r.strpos
if sizebefore > 0 {
// String data before header.
p := r.buf.str[r.strpos:head.offset]
r.strpos += sizebefore
return p
}
r.lhpos++
return head.encode(r.buf.sizebuf[:])
case r.strpos < len(r.buf.str):
// String data at the end, after all list headers.
p := r.buf.str[r.strpos:]
r.strpos = len(r.buf.str)
return p
default:
return nil
}
}
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
// makeWriter creates a writer function for the given type.
func makeWriter(typ reflect.Type, ts tags) (writer, error) {
kind := typ.Kind()
switch {
case typ == rawValueType:
return writeRawValue, nil
case typ.AssignableTo(reflect.PtrTo(bigInt)):
return writeBigIntPtr, nil
case typ.AssignableTo(bigInt):
return writeBigIntNoPtr, nil
case kind == reflect.Ptr:
return makePtrWriter(typ, ts)
case reflect.PtrTo(typ).Implements(encoderInterface):
return makeEncoderWriter(typ), nil
case isUint(kind):
return writeUint, nil
case kind == reflect.Bool:
return writeBool, nil
case kind == reflect.String:
return writeString, nil
case kind == reflect.Slice && isByte(typ.Elem()):
return writeBytes, nil
case kind == reflect.Array && isByte(typ.Elem()):
return makeByteArrayWriter(typ), nil
case kind == reflect.Slice || kind == reflect.Array:
return makeSliceWriter(typ, ts)
case kind == reflect.Struct:
return makeStructWriter(typ)
case kind == reflect.Interface:
return writeInterface, nil
default:
return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ)
}
}
func writeRawValue(val reflect.Value, w *encbuf) error {
w.str = append(w.str, val.Bytes()...)
return nil
}
func writeUint(val reflect.Value, w *encbuf) error {
w.encodeUint(val.Uint())
return nil
}
func writeBool(val reflect.Value, w *encbuf) error {
if val.Bool() {
w.str = append(w.str, 0x01)
} else {
w.str = append(w.str, 0x80)
}
return nil
}
func writeBigIntPtr(val reflect.Value, w *encbuf) error {
ptr := val.Interface().(*big.Int)
if ptr == nil {
w.str = append(w.str, 0x80)
return nil
}
return writeBigInt(ptr, w)
}
func writeBigIntNoPtr(val reflect.Value, w *encbuf) error {
i := val.Interface().(big.Int)
return writeBigInt(&i, w)
}
// wordBytes is the number of bytes in a big.Word
const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
func writeBigInt(i *big.Int, w *encbuf) error {
if i.Sign() == -1 {
return fmt.Errorf("rlp: cannot encode negative *big.Int")
}
bitlen := i.BitLen()
if bitlen <= 64 {
w.encodeUint(i.Uint64())
return nil
}
// Integer is larger than 64 bits, encode from i.Bits().
// The minimal byte length is bitlen rounded up to the next
// multiple of 8, divided by 8.
length := ((bitlen + 7) & -8) >> 3
w.encodeStringHeader(length)
w.str = append(w.str, make([]byte, length)...)
index := length
buf := w.str[len(w.str)-length:]
for _, d := range i.Bits() {
for j := 0; j < wordBytes && index > 0; j++ {
index--
buf[index] = byte(d)
d >>= 8
}
}
return nil
}
func writeBytes(val reflect.Value, w *encbuf) error {
w.encodeString(val.Bytes())
return nil
}
func makeByteArrayWriter(typ reflect.Type) writer {
switch typ.Len() {
case 0:
return writeLengthZeroByteArray
case 1:
return writeLengthOneByteArray
default:
return writeByteArray
}
}
func writeLengthZeroByteArray(val reflect.Value, w *encbuf) error {
w.str = append(w.str, 0x80)
return nil
}
func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
b := byte(val.Index(0).Uint())
if b <= 0x7f {
w.str = append(w.str, b)
} else {
w.str = append(w.str, 0x81, b)
}
return nil
}
func writeByteArray(val reflect.Value, w *encbuf) error {
if !val.CanAddr() {
// Getting the byte slice of val requires it to be addressable. Make it
// addressable by copying.
copy := reflect.New(val.Type()).Elem()
copy.Set(val)
val = copy
}
slice := byteArrayBytes(val)
w.encodeStringHeader(len(slice))
w.str = append(w.str, slice...)
return nil
}
func writeString(val reflect.Value, w *encbuf) error {
s := val.String()
if len(s) == 1 && s[0] <= 0x7f {
// fits single byte, no string header
w.str = append(w.str, s[0])
} else {
w.encodeStringHeader(len(s))
w.str = append(w.str, s...)
}
return nil
}
func writeInterface(val reflect.Value, w *encbuf) error {
if val.IsNil() {
// Write empty list. This is consistent with the previous RLP
// encoder that we had and should therefore avoid any
// problems.
w.str = append(w.str, 0xC0)
return nil
}
eval := val.Elem()
writer, err := cachedWriter(eval.Type())
if err != nil {
return err
}
return writer(eval, w)
}
func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
writer := func(val reflect.Value, w *encbuf) error {
if !ts.tail {
defer w.listEnd(w.list())
}
vlen := val.Len()
for i := 0; i < vlen; i++ {
if err := etypeinfo.writer(val.Index(i), w); err != nil {
return err
}
}
return nil
}
return writer, nil
}
func makeStructWriter(typ reflect.Type) (writer, error) {
fields, err := structFields(typ)
if err != nil {
return nil, err
}
for _, f := range fields {
if f.info.writerErr != nil {
return nil, structFieldError{typ, f.index, f.info.writerErr}
}
}
var writer writer
firstOptionalField := firstOptionalField(fields)
if firstOptionalField == len(fields) {
// This is the writer function for structs without any optional fields.
writer = func(val reflect.Value, w *encbuf) error {
lh := w.list()
for _, f := range fields {
if err := f.info.writer(val.Field(f.index), w); err != nil {
return err
}
}
w.listEnd(lh)
return nil
}
} else {
// If there are any "optional" fields, the writer needs to perform additional
// checks to determine the output list length.
writer = func(val reflect.Value, w *encbuf) error {
lastField := len(fields) - 1
for ; lastField >= firstOptionalField; lastField-- {
if !val.Field(fields[lastField].index).IsZero() {
break
}
}
lh := w.list()
for i := 0; i <= lastField; i++ {
if err := fields[i].info.writer(val.Field(fields[i].index), w); err != nil {
return err
}
}
w.listEnd(lh)
return nil
}
}
return writer, nil
}
func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
// Determine how to encode nil pointers.
var nilKind Kind
if ts.nilOK {
nilKind = ts.nilKind // use struct tag if provided
} else {
nilKind = defaultNilKind(typ.Elem())
}
writer := func(val reflect.Value, w *encbuf) error {
if val.IsNil() {
if nilKind == String {
w.str = append(w.str, 0x80)
} else {
w.listEnd(w.list())
}
return nil
}
return etypeinfo.writer(val.Elem(), w)
}
return writer, nil
}
func makeEncoderWriter(typ reflect.Type) writer {
if typ.Implements(encoderInterface) {
return func(val reflect.Value, w *encbuf) error {
return val.Interface().(Encoder).EncodeRLP(w)
}
}
w := func(val reflect.Value, w *encbuf) error {
if !val.CanAddr() {
// package json simply doesn't call MarshalJSON for this case, but encodes the
// value as if it didn't implement the interface. We don't want to handle it that
// way.
return fmt.Errorf("rlp: unadressable value of type %v, EncodeRLP is pointer method", val.Type())
}
return val.Addr().Interface().(Encoder).EncodeRLP(w)
}
return w
}
// putint writes i to the beginning of b in big endian byte
// order, using the least number of bytes needed to represent i.
func putint(b []byte, i uint64) (size int) {
switch {
case i < (1 << 8):
b[0] = byte(i)
return 1
case i < (1 << 16):
b[0] = byte(i >> 8)
b[1] = byte(i)
return 2
case i < (1 << 24):
b[0] = byte(i >> 16)
b[1] = byte(i >> 8)
b[2] = byte(i)
return 3
case i < (1 << 32):
b[0] = byte(i >> 24)
b[1] = byte(i >> 16)
b[2] = byte(i >> 8)
b[3] = byte(i)
return 4
case i < (1 << 40):
b[0] = byte(i >> 32)
b[1] = byte(i >> 24)
b[2] = byte(i >> 16)
b[3] = byte(i >> 8)
b[4] = byte(i)
return 5
case i < (1 << 48):
b[0] = byte(i >> 40)
b[1] = byte(i >> 32)
b[2] = byte(i >> 24)
b[3] = byte(i >> 16)
b[4] = byte(i >> 8)
b[5] = byte(i)
return 6
case i < (1 << 56):
b[0] = byte(i >> 48)
b[1] = byte(i >> 40)
b[2] = byte(i >> 32)
b[3] = byte(i >> 24)
b[4] = byte(i >> 16)
b[5] = byte(i >> 8)
b[6] = byte(i)
return 7
default:
b[0] = byte(i >> 56)
b[1] = byte(i >> 48)
b[2] = byte(i >> 40)
b[3] = byte(i >> 32)
b[4] = byte(i >> 24)
b[5] = byte(i >> 16)
b[6] = byte(i >> 8)
b[7] = byte(i)
return 8
}
}
// intsize computes the minimum number of bytes required to store i.
func intsize(i uint64) (size int) {
for size = 1; ; size++ {
if i >>= 8; i == 0 {
return size
}
}
}
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rlp
import (
"io"
"reflect"
)
// RawValue represents an encoded RLP value and can be used to delay
// RLP decoding or to precompute an encoding. Note that the decoder does
// not verify whether the content of RawValues is valid RLP.
type RawValue []byte
var rawValueType = reflect.TypeOf(RawValue{})
// ListSize returns the encoded size of an RLP list with the given
// content size.
func ListSize(contentSize uint64) uint64 {
return uint64(headsize(contentSize)) + contentSize
}
// IntSize returns the encoded size of the integer x.
func IntSize(x uint64) int {
if x < 0x80 {
return 1
}
return 1 + intsize(x)
}
// Split returns the content of first RLP value and any
// bytes after the value as subslices of b.
func Split(b []byte) (k Kind, content, rest []byte, err error) {
k, ts, cs, err := readKind(b)
if err != nil {
return 0, nil, b, err
}
return k, b[ts : ts+cs], b[ts+cs:], nil
}
// SplitString splits b into the content of an RLP string
// and any remaining bytes after the string.
func SplitString(b []byte) (content, rest []byte, err error) {
k, content, rest, err := Split(b)
if err != nil {
return nil, b, err
}
if k == List {
return nil, b, ErrExpectedString
}
return content, rest, nil
}
// SplitUint64 decodes an integer at the beginning of b.
// It also returns the remaining data after the integer in 'rest'.
func SplitUint64(b []byte) (x uint64, rest []byte, err error) {
content, rest, err := SplitString(b)
if err != nil {
return 0, b, err
}
switch {
case len(content) == 0:
return 0, rest, nil
case len(content) == 1:
if content[0] == 0 {
return 0, b, ErrCanonInt
}
return uint64(content[0]), rest, nil
case len(content) > 8:
return 0, b, errUintOverflow
default:
x, err = readSize(content, byte(len(content)))
if err != nil {
return 0, b, ErrCanonInt
}
return x, rest, nil
}
}
// SplitList splits b into the content of a list and any remaining
// bytes after the list.
func SplitList(b []byte) (content, rest []byte, err error) {
k, content, rest, err := Split(b)
if err != nil {
return nil, b, err
}
if k != List {
return nil, b, ErrExpectedList
}
return content, rest, nil
}
// CountValues counts the number of encoded values in b.
func CountValues(b []byte) (int, error) {
i := 0
for ; len(b) > 0; i++ {
_, tagsize, size, err := readKind(b)
if err != nil {
return 0, err
}
b = b[tagsize+size:]
}
return i, nil
}
func readKind(buf []byte) (k Kind, tagsize, contentsize uint64, err error) {
if len(buf) == 0 {
return 0, 0, 0, io.ErrUnexpectedEOF
}
b := buf[0]
switch {
case b < 0x80:
k = Byte
tagsize = 0
contentsize = 1
case b < 0xB8:
k = String
tagsize = 1
contentsize = uint64(b - 0x80)
// Reject strings that should've been single bytes.
if contentsize == 1 && len(buf) > 1 && buf[1] < 128 {
return 0, 0, 0, ErrCanonSize
}
case b < 0xC0:
k = String
tagsize = uint64(b-0xB7) + 1
contentsize, err = readSize(buf[1:], b-0xB7)
case b < 0xF8:
k = List
tagsize = 1
contentsize = uint64(b - 0xC0)
default:
k = List
tagsize = uint64(b-0xF7) + 1
contentsize, err = readSize(buf[1:], b-0xF7)
}
if err != nil {
return 0, 0, 0, err
}
// Reject values larger than the input slice.
if contentsize > uint64(len(buf))-tagsize {
return 0, 0, 0, ErrValueTooLarge
}
return k, tagsize, contentsize, err
}
func readSize(b []byte, slen byte) (uint64, error) {
if int(slen) > len(b) {
return 0, io.ErrUnexpectedEOF
}
var s uint64
switch slen {
case 1:
s = uint64(b[0])
case 2:
s = uint64(b[0])<<8 | uint64(b[1])
case 3:
s = uint64(b[0])<<16 | uint64(b[1])<<8 | uint64(b[2])
case 4:
s = uint64(b[0])<<24 | uint64(b[1])<<16 | uint64(b[2])<<8 | uint64(b[3])
case 5:
s = uint64(b[0])<<32 | uint64(b[1])<<24 | uint64(b[2])<<16 | uint64(b[3])<<8 | uint64(b[4])
case 6:
s = uint64(b[0])<<40 | uint64(b[1])<<32 | uint64(b[2])<<24 | uint64(b[3])<<16 | uint64(b[4])<<8 | uint64(b[5])
case 7:
s = uint64(b[0])<<48 | uint64(b[1])<<40 | uint64(b[2])<<32 | uint64(b[3])<<24 | uint64(b[4])<<16 | uint64(b[5])<<8 | uint64(b[6])
case 8:
s = uint64(b[0])<<56 | uint64(b[1])<<48 | uint64(b[2])<<40 | uint64(b[3])<<32 | uint64(b[4])<<24 | uint64(b[5])<<16 | uint64(b[6])<<8 | uint64(b[7])
}
// Reject sizes < 56 (shouldn't have separate size) and sizes with
// leading zero bytes.
if s < 56 || b[0] == 0 {
return 0, ErrCanonSize
}
return s, nil
}
// AppendUint64 appends the RLP encoding of i to b, and returns the resulting slice.
func AppendUint64(b []byte, i uint64) []byte {
if i == 0 {
return append(b, 0x80)
} else if i < 128 {
return append(b, byte(i))
}
switch {
case i < (1 << 8):
return append(b, 0x81, byte(i))
case i < (1 << 16):
return append(b, 0x82,
byte(i>>8),
byte(i),
)
case i < (1 << 24):
return append(b, 0x83,
byte(i>>16),
byte(i>>8),
byte(i),
)
case i < (1 << 32):
return append(b, 0x84,
byte(i>>24),
byte(i>>16),
byte(i>>8),
byte(i),
)
case i < (1 << 40):
return append(b, 0x85,
byte(i>>32),
byte(i>>24),
byte(i>>16),
byte(i>>8),
byte(i),
)
case i < (1 << 48):
return append(b, 0x86,
byte(i>>40),
byte(i>>32),
byte(i>>24),
byte(i>>16),
byte(i>>8),
byte(i),
)
case i < (1 << 56):
return append(b, 0x87,
byte(i>>48),
byte(i>>40),
byte(i>>32),
byte(i>>24),
byte(i>>16),
byte(i>>8),
byte(i),
)
default:
return append(b, 0x88,
byte(i>>56),
byte(i>>48),
byte(i>>40),
byte(i>>32),
byte(i>>24),
byte(i>>16),
byte(i>>8),
byte(i),
)
}
}
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rlp
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// typeinfo is an entry in the type cache.
type typeinfo struct {
decoder decoder
decoderErr error // error from makeDecoder
writer writer
writerErr error // error from makeWriter
}
// tags represents struct tags.
type tags struct {
// rlp:"nil" controls whether empty input results in a nil pointer.
// nilKind is the kind of empty value allowed for the field.
nilKind Kind
nilOK bool
// rlp:"optional" allows for a field to be missing in the input list.
// If this is set, all subsequent fields must also be optional.
optional bool
// rlp:"tail" controls whether this field swallows additional list elements. It can
// only be set for the last field, which must be of slice type.
tail bool
// rlp:"-" ignores fields.
ignored bool
}
// typekey is the key of a type in typeCache. It includes the struct tags because
// they might generate a different decoder.
type typekey struct {
reflect.Type
tags
}
type decoder func(*Stream, reflect.Value) error
type writer func(reflect.Value, *encbuf) error
var theTC = newTypeCache()
type typeCache struct {
cur atomic.Value
// This lock synchronizes writers.
mu sync.Mutex
next map[typekey]*typeinfo
}
func newTypeCache() *typeCache {
c := new(typeCache)
c.cur.Store(make(map[typekey]*typeinfo))
return c
}
func cachedDecoder(typ reflect.Type) (decoder, error) {
info := theTC.info(typ)
return info.decoder, info.decoderErr
}
func cachedWriter(typ reflect.Type) (writer, error) {
info := theTC.info(typ)
return info.writer, info.writerErr
}
func (c *typeCache) info(typ reflect.Type) *typeinfo {
key := typekey{Type: typ}
if info := c.cur.Load().(map[typekey]*typeinfo)[key]; info != nil {
return info
}
// Not in the cache, need to generate info for this type.
return c.generate(typ, tags{})
}
func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
c.mu.Lock()
defer c.mu.Unlock()
cur := c.cur.Load().(map[typekey]*typeinfo)
if info := cur[typekey{typ, tags}]; info != nil {
return info
}
// Copy cur to next.
c.next = make(map[typekey]*typeinfo, len(cur)+1)
for k, v := range cur {
c.next[k] = v
}
// Generate.
info := c.infoWhileGenerating(typ, tags)
// next -> cur
c.cur.Store(c.next)
c.next = nil
return info
}
func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags tags) *typeinfo {
key := typekey{typ, tags}
if info := c.next[key]; info != nil {
return info
}
// Put a dummy value into the cache before generating.
// If the generator tries to lookup itself, it will get
// the dummy value and won't call itself recursively.
info := new(typeinfo)
c.next[key] = info
info.generate(typ, tags)
return info
}
type field struct {
index int
info *typeinfo
optional bool
}
// structFields resolves the typeinfo of all public fields in a struct type.
func structFields(typ reflect.Type) (fields []field, err error) {
var (
lastPublic = lastPublicField(typ)
anyOptional = false
)
for i := 0; i < typ.NumField(); i++ {
if f := typ.Field(i); f.PkgPath == "" { // exported
tags, err := parseStructTag(typ, i, lastPublic)
if err != nil {
return nil, err
}
// Skip rlp:"-" fields.
if tags.ignored {
continue
}
// If any field has the "optional" tag, subsequent fields must also have it.
if tags.optional || tags.tail {
anyOptional = true
} else if anyOptional {
return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name)
}
info := theTC.infoWhileGenerating(f.Type, tags)
fields = append(fields, field{i, info, tags.optional})
}
}
return fields, nil
}
// anyOptionalFields returns the index of the first field with "optional" tag.
func firstOptionalField(fields []field) int {
for i, f := range fields {
if f.optional {
return i
}
}
return len(fields)
}
type structFieldError struct {
typ reflect.Type
field int
err error
}
func (e structFieldError) Error() string {
return fmt.Sprintf("%v (struct field %v.%s)", e.err, e.typ, e.typ.Field(e.field).Name)
}
type structTagError struct {
typ reflect.Type
field, tag, err string
}
func (e structTagError) Error() string {
return fmt.Sprintf("rlp: invalid struct tag %q for %v.%s (%s)", e.tag, e.typ, e.field, e.err)
}
func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) {
f := typ.Field(fi)
var ts tags
for _, t := range strings.Split(f.Tag.Get("rlp"), ",") {
switch t = strings.TrimSpace(t); t {
case "":
case "-":
ts.ignored = true
case "nil", "nilString", "nilList":
ts.nilOK = true
if f.Type.Kind() != reflect.Ptr {
return ts, structTagError{typ, f.Name, t, "field is not a pointer"}
}
switch t {
case "nil":
ts.nilKind = defaultNilKind(f.Type.Elem())
case "nilString":
ts.nilKind = String
case "nilList":
ts.nilKind = List
}
case "optional":
ts.optional = true
if ts.tail {
return ts, structTagError{typ, f.Name, t, `also has "tail" tag`}
}
case "tail":
ts.tail = true
if fi != lastPublic {
return ts, structTagError{typ, f.Name, t, "must be on last field"}
}
if ts.optional {
return ts, structTagError{typ, f.Name, t, `also has "optional" tag`}
}
if f.Type.Kind() != reflect.Slice {
return ts, structTagError{typ, f.Name, t, "field type is not slice"}
}
default:
return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name)
}
}
return ts, nil
}
func lastPublicField(typ reflect.Type) int {
last := 0
for i := 0; i < typ.NumField(); i++ {
if typ.Field(i).PkgPath == "" {
last = i
}
}
return last
}
func (i *typeinfo) generate(typ reflect.Type, tags tags) {
i.decoder, i.decoderErr = makeDecoder(typ, tags)
i.writer, i.writerErr = makeWriter(typ, tags)
}
// defaultNilKind determines whether a nil pointer to typ encodes/decodes
// as an empty string or empty list.
func defaultNilKind(typ reflect.Type) Kind {
k := typ.Kind()
if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(typ) {
return String
}
return List
}
func isUint(k reflect.Kind) bool {
return k >= reflect.Uint && k <= reflect.Uintptr
}
func isByte(typ reflect.Type) bool {
return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface)
}
func isByteArray(typ reflect.Type) bool {
return (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Array) && isByte(typ.Elem())
}
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build !nacl,!js,cgo
package rlp
import (
"reflect"
"unsafe"
)
// byteArrayBytes returns a slice of the byte array v.
func byteArrayBytes(v reflect.Value) []byte {
len := v.Len()
var s []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))
hdr.Data = v.UnsafeAddr()
hdr.Cap = len
hdr.Len = len
return s
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment