Commit 61e353e1 authored by protolambda's avatar protolambda Committed by GitHub

style(batch-submitter,bss-core,proxyd): Fix lint Go (#3328)

* style(batch-submitter): fix lint

* style(bss-core): fix lint

* chore(proxyd): use io and os instead of deprecated ioutil methods, fixes lint
parent c97312a9
...@@ -68,10 +68,10 @@ func (c BatchContext) MarkerBatchType() BatchType { ...@@ -68,10 +68,10 @@ func (c BatchContext) MarkerBatchType() BatchType {
// Write encodes the BatchContext into a 16-byte stream using the following // Write encodes the BatchContext into a 16-byte stream using the following
// encoding: // encoding:
// - num_sequenced_txs: 3 bytes // - num_sequenced_txs: 3 bytes
// - num_subsequent_queue_txs: 3 bytes // - num_subsequent_queue_txs: 3 bytes
// - timestamp: 5 bytes // - timestamp: 5 bytes
// - block_number: 5 bytes // - block_number: 5 bytes
// //
// Note that writing to a bytes.Buffer cannot // Note that writing to a bytes.Buffer cannot
// error, so errors are ignored here // error, so errors are ignored here
...@@ -85,10 +85,10 @@ func (c *BatchContext) Write(w *bytes.Buffer) { ...@@ -85,10 +85,10 @@ func (c *BatchContext) Write(w *bytes.Buffer) {
// Read decodes the BatchContext from the passed reader. If fewer than 16-bytes // Read decodes the BatchContext from the passed reader. If fewer than 16-bytes
// remain, an error is returned. Otherwise the first 16-bytes will be read using // remain, an error is returned. Otherwise the first 16-bytes will be read using
// the expected encoding: // the expected encoding:
// - num_sequenced_txs: 3 bytes // - num_sequenced_txs: 3 bytes
// - num_subsequent_queue_txs: 3 bytes // - num_subsequent_queue_txs: 3 bytes
// - timestamp: 5 bytes // - timestamp: 5 bytes
// - block_number: 5 bytes // - block_number: 5 bytes
func (c *BatchContext) Read(r io.Reader) error { func (c *BatchContext) Read(r io.Reader) error {
if err := readUint64(r, &c.NumSequencedTxs, 3); err != nil { if err := readUint64(r, &c.NumSequencedTxs, 3); err != nil {
return err return err
...@@ -188,13 +188,13 @@ type AppendSequencerBatchParams struct { ...@@ -188,13 +188,13 @@ type AppendSequencerBatchParams struct {
} }
// Write encodes the AppendSequencerBatchParams using the following format: // Write encodes the AppendSequencerBatchParams using the following format:
// - should_start_at_element: 5 bytes // - should_start_at_element: 5 bytes
// - total_elements_to_append: 3 bytes // - total_elements_to_append: 3 bytes
// - num_contexts: 3 bytes // - num_contexts: 3 bytes
// - num_contexts * batch_context: num_contexts * 16 bytes // - num_contexts * batch_context: num_contexts * 16 bytes
// - [num txs omitted] // - [num txs omitted]
// - tx_len: 3 bytes // - tx_len: 3 bytes
// - tx_bytes: tx_len bytes // - tx_bytes: tx_len bytes
// //
// Typed batches include a dummy context as the first context // Typed batches include a dummy context as the first context
// where the timestamp is 0. The blocknumber is interpreted // where the timestamp is 0. The blocknumber is interpreted
...@@ -288,13 +288,13 @@ func (p *AppendSequencerBatchParams) Serialize( ...@@ -288,13 +288,13 @@ func (p *AppendSequencerBatchParams) Serialize(
// stream does not terminate cleanly with an EOF while reading a tx_len, this // stream does not terminate cleanly with an EOF while reading a tx_len, this
// method will return an error. Otherwise, the stream will be parsed according // method will return an error. Otherwise, the stream will be parsed according
// to the following format: // to the following format:
// - should_start_at_element: 5 bytes // - should_start_at_element: 5 bytes
// - total_elements_to_append: 3 bytes // - total_elements_to_append: 3 bytes
// - num_contexts: 3 bytes // - num_contexts: 3 bytes
// - num_contexts * batch_context: num_contexts * 16 bytes // - num_contexts * batch_context: num_contexts * 16 bytes
// - [num txs omitted] // - [num txs omitted]
// - tx_len: 3 bytes // - tx_len: 3 bytes
// - tx_bytes: tx_len bytes // - tx_bytes: tx_len bytes
func (p *AppendSequencerBatchParams) Read(r io.Reader) error { func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if err := readUint64(r, &p.ShouldStartAtElement, 5); err != nil { if err := readUint64(r, &p.ShouldStartAtElement, 5); err != nil {
return err return err
......
...@@ -15,10 +15,10 @@ import ( ...@@ -15,10 +15,10 @@ import (
// TestBatchContextEncodeDecode tests the (de)serialization of a BatchContext // TestBatchContextEncodeDecode tests the (de)serialization of a BatchContext
// against the spec test vector. The encoding should be: // against the spec test vector. The encoding should be:
// - num_sequenced_txs: 3 bytes // - num_sequenced_txs: 3 bytes
// - num_subsequent_queue_txs: 3 bytes // - num_subsequent_queue_txs: 3 bytes
// - timestamp: 5 bytes // - timestamp: 5 bytes
// - block_number: 5 bytes // - block_number: 5 bytes
func TestBatchContextEncodeDecode(t *testing.T) { func TestBatchContextEncodeDecode(t *testing.T) {
t.Parallel() t.Parallel()
......
...@@ -32,8 +32,8 @@ func ParseAddress(address string) (common.Address, error) { ...@@ -32,8 +32,8 @@ func ParseAddress(address string) (common.Address, error) {
// GetConfiguredPrivateKey computes the private key for our configured services. // GetConfiguredPrivateKey computes the private key for our configured services.
// The two supported methods are: // The two supported methods are:
// - Derived from BIP39 mnemonic and BIP32 HD derivation path. // - Derived from BIP39 mnemonic and BIP32 HD derivation path.
// - Directly from a serialized private key. // - Directly from a serialized private key.
func GetConfiguredPrivateKey(mnemonic, hdPath, privKeyStr string) ( func GetConfiguredPrivateKey(mnemonic, hdPath, privKeyStr string) (
*ecdsa.PrivateKey, error) { *ecdsa.PrivateKey, error) {
......
...@@ -209,9 +209,9 @@ func TestParsePrivateKeyStr(t *testing.T) { ...@@ -209,9 +209,9 @@ func TestParsePrivateKeyStr(t *testing.T) {
} }
// TestGetConfiguredPrivateKey asserts that GetConfiguredPrivateKey either: // TestGetConfiguredPrivateKey asserts that GetConfiguredPrivateKey either:
// 1) Derives the correct private key assuming the BIP39 mnemonic and BIP32 // 1. Derives the correct private key assuming the BIP39 mnemonic and BIP32
// derivation path are both present and the private key string is omitted. // derivation path are both present and the private key string is omitted.
// 2) Parses the correct private key assuming only the private key string is // 2. Parses the correct private key assuming only the private key string is
// present, but the BIP39 mnemonic and BIP32 derivation path are omitted. // present, but the BIP39 mnemonic and BIP32 derivation path are omitted.
func TestGetConfiguredPrivateKey(t *testing.T) { func TestGetConfiguredPrivateKey(t *testing.T) {
tests := []struct { tests := []struct {
......
...@@ -330,7 +330,8 @@ func waitMined( ...@@ -330,7 +330,8 @@ func waitMined(
// CalcGasFeeCap deterministically computes the recommended gas fee cap given // CalcGasFeeCap deterministically computes the recommended gas fee cap given
// the base fee and gasTipCap. The resulting gasFeeCap is equal to: // the base fee and gasTipCap. The resulting gasFeeCap is equal to:
// gasTipCap + 2*baseFee. //
// gasTipCap + 2*baseFee.
func CalcGasFeeCap(baseFee, gasTipCap *big.Int) *big.Int { func CalcGasFeeCap(baseFee, gasTipCap *big.Int) *big.Int {
return new(big.Int).Add( return new(big.Int).Add(
gasTipCap, gasTipCap,
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"math/rand" "math/rand"
"net/http" "net/http"
...@@ -408,7 +407,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -408,7 +407,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
} }
defer httpRes.Body.Close() defer httpRes.Body.Close()
resB, err := ioutil.ReadAll(io.LimitReader(httpRes.Body, b.maxResponseSize)) resB, err := io.ReadAll(io.LimitReader(httpRes.Body, b.maxResponseSize))
if err != nil { if err != nil {
return nil, wrapErr(err, "error reading response body") return nil, wrapErr(err, "error reading response body")
} }
......
...@@ -4,7 +4,7 @@ import ( ...@@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"io/ioutil" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"strings" "strings"
...@@ -122,7 +122,7 @@ func (h *BatchRPCResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Reques ...@@ -122,7 +122,7 @@ func (h *BatchRPCResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Reques
h.mtx.Lock() h.mtx.Lock()
defer h.mtx.Unlock() defer h.mtx.Unlock()
body, err := ioutil.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -241,12 +241,12 @@ func (m *MockBackend) Requests() []*RecordedRequest { ...@@ -241,12 +241,12 @@ func (m *MockBackend) Requests() []*RecordedRequest {
func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) { func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) {
m.mtx.Lock() m.mtx.Lock()
body, err := ioutil.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
panic(err) panic(err)
} }
clone := r.Clone(context.Background()) clone := r.Clone(context.Background())
clone.Body = ioutil.NopCloser(bytes.NewReader(body)) clone.Body = io.NopCloser(bytes.NewReader(body))
m.requests = append(m.requests, &RecordedRequest{ m.requests = append(m.requests, &RecordedRequest{
Method: r.Method, Method: r.Method,
Headers: r.Header.Clone(), Headers: r.Header.Clone(),
......
...@@ -4,7 +4,7 @@ import ( ...@@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"os" "os"
"testing" "testing"
...@@ -67,7 +67,7 @@ func (p *ProxydHTTPClient) SendRequest(body []byte) ([]byte, int, error) { ...@@ -67,7 +67,7 @@ func (p *ProxydHTTPClient) SendRequest(body []byte) ([]byte, int, error) {
} }
defer res.Body.Close() defer res.Body.Close()
code := res.StatusCode code := res.StatusCode
resBody, err := ioutil.ReadAll(res.Body) resBody, err := io.ReadAll(res.Body)
if err != nil { if err != nil {
panic(err) panic(err)
} }
......
...@@ -3,7 +3,6 @@ package proxyd ...@@ -3,7 +3,6 @@ package proxyd
import ( import (
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"strings" "strings"
) )
...@@ -103,7 +102,7 @@ func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) { ...@@ -103,7 +102,7 @@ func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) {
} }
func ParseRPCRes(r io.Reader) (*RPCRes, error) { func ParseRPCRes(r io.Reader) (*RPCRes, error) {
body, err := ioutil.ReadAll(r) body, err := io.ReadAll(r)
if err != nil { if err != nil {
return nil, wrapErr(err, "error reading RPC response") return nil, wrapErr(err, "error reading RPC response")
} }
......
...@@ -6,7 +6,6 @@ import ( ...@@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"net/http" "net/http"
"strconv" "strconv"
...@@ -236,7 +235,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -236,7 +235,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"user_agent", userAgent, "user_agent", userAgent,
) )
body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize)) body, err := io.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
if err != nil { if err != nil {
log.Error("error reading request body", "err", err) log.Error("error reading request body", "err", err)
writeRPCError(ctx, w, nil, ErrInternal) writeRPCError(ctx, w, nil, ErrInternal)
......
...@@ -4,11 +4,11 @@ import ( ...@@ -4,11 +4,11 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"errors" "errors"
"io/ioutil" "os"
) )
func CreateTLSClient(ca string) (*tls.Config, error) { func CreateTLSClient(ca string) (*tls.Config, error) {
pem, err := ioutil.ReadFile(ca) pem, err := os.ReadFile(ca)
if err != nil { if err != nil {
return nil, wrapErr(err, "error reading CA") return nil, wrapErr(err, "error reading CA")
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment