Commit 96ef7bb4 authored by Inphi's avatar Inphi Committed by GitHub

cannon: Implement 64-bit Solidity VM (#12665)

* cannon: Implement MIPS64Memory.sol

* cannon: Implement 64-bit Solidity VM

- Implements 64-bit Cannon (with multithreading) in MIPS64.sol
- Re-enable differential testing for 64-bit VMs

* review comments

* check pc for 4-byte alignment

* gofmt

* update snapshot

* address nits; add more add/sub/mult overflow tests

* diff test misaligned instruction

* fix mul[t] MIPS64.sol emulation

* diff fuzz mul operations

* fix addiu test case

* fix GetInstruction return value type
parent d6bda033
...@@ -56,9 +56,12 @@ sanitize-program: ...@@ -56,9 +56,12 @@ sanitize-program:
contract: contract:
cd ../packages/contracts-bedrock && forge build cd ../packages/contracts-bedrock && forge build
test: elf contract test: elf contract test64
go test -v ./... go test -v ./...
test64: elf contract
go test -tags=cannon64 -run '(TestEVM.*64|TestHelloEVM|TestClaimEVM)' ./mipsevm/tests
diff-%-cannon: cannon elf diff-%-cannon: cannon elf
$$OTHER_CANNON load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate-other.bin.gz --meta "" $$OTHER_CANNON load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate-other.bin.gz --meta ""
./bin/cannon load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate.bin.gz --meta "" ./bin/cannon load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate.bin.gz --meta ""
...@@ -96,6 +99,10 @@ fuzz: ...@@ -96,6 +99,10 @@ fuzz:
go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallCloneST ./mipsevm/tests go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallCloneST ./mipsevm/tests
# Multi-threaded tests # Multi-threaded tests
go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallCloneMT ./mipsevm/tests go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallCloneMT ./mipsevm/tests
# 64-bit tests - increased fuzztime for a larger input space
go test $(FUZZLDFLAGS) -tags=cannon64 -run NOTAREALTEST -v -fuzztime 20s -fuzz=FuzzStateConsistencyMulOp ./mipsevm/tests
go test $(FUZZLDFLAGS) -tags=cannon64 -run NOTAREALTEST -v -fuzztime 20s -fuzz=FuzzStateConsistencyMultOp ./mipsevm/tests
go test $(FUZZLDFLAGS) -tags=cannon64 -run NOTAREALTEST -v -fuzztime 20s -fuzz=FuzzStateConsistencyMultuOp ./mipsevm/tests
.PHONY: \ .PHONY: \
cannon32-impl \ cannon32-impl \
......
...@@ -3,6 +3,7 @@ package exec ...@@ -3,6 +3,7 @@ package exec
import ( import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/arch"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory"
) )
...@@ -37,7 +38,7 @@ func (m *MemoryTrackerImpl) TrackMemAccess(effAddr Word) { ...@@ -37,7 +38,7 @@ func (m *MemoryTrackerImpl) TrackMemAccess(effAddr Word) {
// TrackMemAccess2 creates a proof for a memory access following a call to TrackMemAccess // TrackMemAccess2 creates a proof for a memory access following a call to TrackMemAccess
// This is used to generate proofs for contiguous memory accesses within the same step // This is used to generate proofs for contiguous memory accesses within the same step
func (m *MemoryTrackerImpl) TrackMemAccess2(effAddr Word) { func (m *MemoryTrackerImpl) TrackMemAccess2(effAddr Word) {
if m.memProofEnabled && m.lastMemAccess+4 != effAddr { if m.memProofEnabled && m.lastMemAccess+arch.WordSizeBytes != effAddr {
panic(fmt.Errorf("unexpected disjointed mem access at %08x, last memory access is at %08x buffered", effAddr, m.lastMemAccess)) panic(fmt.Errorf("unexpected disjointed mem access at %08x, last memory access is at %08x buffered", effAddr, m.lastMemAccess))
} }
m.lastMemAccess = effAddr m.lastMemAccess = effAddr
......
...@@ -378,7 +378,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem ...@@ -378,7 +378,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem
w := uint32(SelectSubWord(rs, mem, 4, false)) w := uint32(SelectSubWord(rs, mem, 4, false))
val := w >> (24 - (rs&3)*8) val := w >> (24 - (rs&3)*8)
mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8) mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8)
lwrResult := ((uint32(rt) & ^mask) | val) & 0xFFFFFFFF lwrResult := (uint32(rt) & ^mask) | val
if rs&3 == 3 { // loaded bit 31 if rs&3 == 3 { // loaded bit 31
return SignExtend(Word(lwrResult), 32) return SignExtend(Word(lwrResult), 32)
} else { } else {
...@@ -539,13 +539,13 @@ func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]Word, fun uint32, rs Wor ...@@ -539,13 +539,13 @@ func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]Word, fun uint32, rs Wor
cpu.HI = SignExtend(Word(acc>>32), 32) cpu.HI = SignExtend(Word(acc>>32), 32)
cpu.LO = SignExtend(Word(uint32(acc)), 32) cpu.LO = SignExtend(Word(uint32(acc)), 32)
case 0x1a: // div case 0x1a: // div
if rt == 0 { if uint32(rt) == 0 {
panic("instruction divide by zero") panic("instruction divide by zero")
} }
cpu.HI = SignExtend(Word(int32(rs)%int32(rt)), 32) cpu.HI = SignExtend(Word(int32(rs)%int32(rt)), 32)
cpu.LO = SignExtend(Word(int32(rs)/int32(rt)), 32) cpu.LO = SignExtend(Word(int32(rs)/int32(rt)), 32)
case 0x1b: // divu case 0x1b: // divu
if rt == 0 { if uint32(rt) == 0 {
panic("instruction divide by zero") panic("instruction divide by zero")
} }
cpu.HI = SignExtend(Word(uint32(rs)%uint32(rt)), 32) cpu.HI = SignExtend(Word(uint32(rs)%uint32(rt)), 32)
......
...@@ -10,7 +10,6 @@ import ( ...@@ -10,7 +10,6 @@ import (
"github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -134,12 +133,12 @@ func TestEVMSingleStep_Operators64(t *testing.T) { ...@@ -134,12 +133,12 @@ func TestEVMSingleStep_Operators64(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
func TestEVMSingleStep_Shift(t *testing.T) { func TestEVMSingleStep_Shift64(t *testing.T) {
cases := []struct { cases := []struct {
name string name string
rd Word rd Word
...@@ -190,7 +189,8 @@ func TestEVMSingleStep_Shift(t *testing.T) { ...@@ -190,7 +189,8 @@ func TestEVMSingleStep_Shift(t *testing.T) {
for i, tt := range cases { for i, tt := range cases {
testName := fmt.Sprintf("%v %v", v.Name, tt.name) testName := fmt.Sprintf("%v %v", v.Name, tt.name)
t.Run(testName, func(t *testing.T) { t.Run(testName, func(t *testing.T) {
goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPCAndNextPC(0)) pc := Word(0x0)
goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPCAndNextPC(pc))
state := goVm.GetState() state := goVm.GetState()
var insn uint32 var insn uint32
var rtReg uint32 var rtReg uint32
...@@ -200,7 +200,7 @@ func TestEVMSingleStep_Shift(t *testing.T) { ...@@ -200,7 +200,7 @@ func TestEVMSingleStep_Shift(t *testing.T) {
insn = rtReg<<16 | rdReg<<11 | tt.sa<<6 | tt.funct insn = rtReg<<16 | rdReg<<11 | tt.sa<<6 | tt.funct
state.GetRegistersRef()[rdReg] = tt.rd state.GetRegistersRef()[rdReg] = tt.rd
state.GetRegistersRef()[rtReg] = tt.rt state.GetRegistersRef()[rtReg] = tt.rt
testutil.StoreInstruction(state.GetMemory(), 0, insn) testutil.StoreInstruction(state.GetMemory(), pc, insn)
step := state.GetStep() step := state.GetStep()
// Setup expectations // Setup expectations
...@@ -213,7 +213,7 @@ func TestEVMSingleStep_Shift(t *testing.T) { ...@@ -213,7 +213,7 @@ func TestEVMSingleStep_Shift(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
...@@ -455,12 +455,12 @@ func TestEVMSingleStep_LoadStore64(t *testing.T) { ...@@ -455,12 +455,12 @@ func TestEVMSingleStep_LoadStore64(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
func TestEVMSingleStep_DivMult(t *testing.T) { func TestEVMSingleStep_DivMult64(t *testing.T) {
cases := []struct { cases := []struct {
name string name string
rs Word rs Word
...@@ -470,6 +470,14 @@ func TestEVMSingleStep_DivMult(t *testing.T) { ...@@ -470,6 +470,14 @@ func TestEVMSingleStep_DivMult(t *testing.T) {
expectHi Word expectHi Word
expectPanic string expectPanic string
}{ }{
// TODO(#12598): Fix 32-bit tests and remove these
{name: "mult", funct: uint32(0x18), rs: Word(0x0F_FF_00_00), rt: Word(100), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)},
{name: "mult", funct: uint32(0x18), rs: Word(0xFF_FF_FF_FF), rt: Word(0xFF_FF_FF_FF), expectHi: Word(0x0), expectLo: Word(0x1)},
{name: "mult", funct: uint32(0x18), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_DD), expectHi: Word(0xE), expectLo: Word(0xFF_FF_FF_FF_FC_FC_FD_27)},
{name: "multu", funct: uint32(0x19), rs: Word(0x0F_FF_00_00), rt: Word(100), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)},
{name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_FF), rt: Word(0xFF_FF_FF_FF), expectHi: Word(0xFF_FF_FF_FF_FF_FF_FF_FE), expectLo: Word(0x1)},
{name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_BE), expectHi: Word(0xFF_FF_FF_FF_AA_BB_CC_9F), expectLo: Word(0xFF_FF_FF_FF_FC_FD_02_9A)},
// dmult s1, s2 // dmult s1, s2
// expected hi,lo were verified using qemu-mips // expected hi,lo were verified using qemu-mips
{name: "dmult 0", funct: 0x1c, rs: 0, rt: 0, expectLo: 0, expectHi: 0}, {name: "dmult 0", funct: 0x1c, rs: 0, rt: 0, expectLo: 0, expectHi: 0},
...@@ -530,6 +538,10 @@ func TestEVMSingleStep_DivMult(t *testing.T) { ...@@ -530,6 +538,10 @@ func TestEVMSingleStep_DivMult(t *testing.T) {
{name: "ddivu", funct: 0x1f, rs: ^Word(0), rt: ^Word(0), expectLo: 1, expectHi: 0}, {name: "ddivu", funct: 0x1f, rs: ^Word(0), rt: ^Word(0), expectLo: 1, expectHi: 0},
{name: "ddivu", funct: 0x1f, rs: ^Word(0), rt: 2, expectLo: 0x7F_FF_FF_FF_FF_FF_FF_FF, expectHi: 1}, {name: "ddivu", funct: 0x1f, rs: ^Word(0), rt: 2, expectLo: 0x7F_FF_FF_FF_FF_FF_FF_FF, expectHi: 1},
{name: "ddivu", funct: 0x1f, rs: 0x7F_FF_FF_FF_00_00_00_00, rt: ^Word(0), expectLo: 0, expectHi: 0x7F_FF_FF_FF_00_00_00_00}, {name: "ddivu", funct: 0x1f, rs: 0x7F_FF_FF_FF_00_00_00_00, rt: ^Word(0), expectLo: 0, expectHi: 0x7F_FF_FF_FF_00_00_00_00},
// a couple div/divu 64-bit edge cases
{name: "div lower word zero", funct: 0x1a, rs: 1, rt: 0xFF_FF_FF_FF_00_00_00_00, expectPanic: "instruction divide by zero"},
{name: "divu lower word zero", funct: 0x1b, rs: 1, rt: 0xFF_FF_FF_FF_00_00_00_00, expectPanic: "instruction divide by zero"},
} }
v := GetMultiThreadedTestCase(t) v := GetMultiThreadedTestCase(t)
...@@ -560,15 +572,13 @@ func TestEVMSingleStep_DivMult(t *testing.T) { ...@@ -560,15 +572,13 @@ func TestEVMSingleStep_DivMult(t *testing.T) {
stepWitness, err := goVm.Step(true) stepWitness, err := goVm.Step(true)
require.NoError(t, err) require.NoError(t, err)
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
} }
}) })
} }
} }
func TestEVMSingleStepBranch64(t *testing.T) { func TestEVMSingleStep_Branch64(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -651,7 +661,7 @@ func TestEVMSingleStepBranch64(t *testing.T) { ...@@ -651,7 +661,7 @@ func TestEVMSingleStepBranch64(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
......
...@@ -25,8 +25,6 @@ func TestEVM(t *testing.T) { ...@@ -25,8 +25,6 @@ func TestEVM(t *testing.T) {
testFiles, err := os.ReadDir("open_mips_tests/test/bin") testFiles, err := os.ReadDir("open_mips_tests/test/bin")
require.NoError(t, err) require.NoError(t, err)
var tracer *tracing.Hooks // no-tracer by default, but test_util.MarkdownTracer
cases := GetMipsVersionTestCases(t) cases := GetMipsVersionTestCases(t)
skippedTests := map[string][]string{ skippedTests := map[string][]string{
"multi-threaded": {"clone.bin"}, "multi-threaded": {"clone.bin"},
...@@ -51,7 +49,6 @@ func TestEVM(t *testing.T) { ...@@ -51,7 +49,6 @@ func TestEVM(t *testing.T) {
expectPanic := strings.HasSuffix(f.Name(), "panic.bin") expectPanic := strings.HasSuffix(f.Name(), "panic.bin")
evm := testutil.NewMIPSEVM(c.Contracts) evm := testutil.NewMIPSEVM(c.Contracts)
evm.SetTracer(tracer)
evm.SetLocalOracle(oracle) evm.SetLocalOracle(oracle)
testutil.LogStepFailureAtCleanup(t, evm) testutil.LogStepFailureAtCleanup(t, evm)
...@@ -117,8 +114,6 @@ func TestEVM(t *testing.T) { ...@@ -117,8 +114,6 @@ func TestEVM(t *testing.T) {
} }
func TestEVMSingleStep_Jump(t *testing.T) { func TestEVMSingleStep_Jump(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -157,15 +152,13 @@ func TestEVMSingleStep_Jump(t *testing.T) { ...@@ -157,15 +152,13 @@ func TestEVMSingleStep_Jump(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVMSingleStep_Operators(t *testing.T) { func TestEVMSingleStep_Operators(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -177,22 +170,45 @@ func TestEVMSingleStep_Operators(t *testing.T) { ...@@ -177,22 +170,45 @@ func TestEVMSingleStep_Operators(t *testing.T) {
opcode uint32 opcode uint32
expectRes Word expectRes Word
}{ }{
{name: "add", funct: 0x20, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // add t0, s1, s2 {name: "add", funct: 0x20, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // add t0, s1, s2
{name: "addu", funct: 0x21, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // addu t0, s1, s2 {name: "add", funct: 0x20, isImm: false, rs: ^Word(0), rt: ^Word(0), expectRes: Word(0xFF_FF_FF_FE)}, // add t0, s1, s2
{name: "addi", opcode: 0x8, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addi t0, s1, 40 {name: "add", funct: 0x20, isImm: false, rs: Word(0x7F_FF_FF_FF), rt: Word(0x7F_FF_FF_FF), expectRes: Word(0xFF_FF_FF_FE)}, // add t0, s1, s2
{name: "addi sign", opcode: 0x8, isImm: true, rs: Word(2), rt: Word(1), imm: uint16(0xfffe), expectRes: Word(0)}, // addi t0, s1, -2 {name: "add", funct: 0x20, isImm: false, rs: ^Word(0), rt: Word(2), expectRes: Word(1)}, // add t0, s1, s2
{name: "addiu", opcode: 0x9, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addiu t0, s1, 40 {name: "add", funct: 0x20, isImm: false, rs: Word(2), rt: ^Word(0), expectRes: Word(1)}, // add t0, s1, s2
{name: "sub", funct: 0x22, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // sub t0, s1, s2 {name: "add", funct: 0x20, isImm: false, rs: Word(0x7F_FF_FF_FF), rt: Word(1), expectRes: Word(0x80_00_00_00)}, // add t0, s1, s2
{name: "subu", funct: 0x23, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // subu t0, s1, s2
{name: "and", funct: 0x24, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(160)}, // and t0, s1, s2 {name: "addu", funct: 0x21, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // addu t0, s1, s2
{name: "andi", opcode: 0xc, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(0)}, // andi t0, s1, 40 {name: "addu", funct: 0x21, isImm: false, rs: ^Word(0), rt: ^Word(0), expectRes: Word(0xFF_FF_FF_FE)}, // addu t0, s1, s2
{name: "or", funct: 0x25, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1530)}, // or t0, s1, s2 {name: "addu", funct: 0x21, isImm: false, rs: Word(0x7F_FF_FF_FF), rt: Word(0x7F_FF_FF_FF), expectRes: Word(0xFF_FF_FF_FE)}, // addu t0, s1, s2
{name: "ori", opcode: 0xd, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // ori t0, s1, 40 {name: "addu", funct: 0x21, isImm: false, rs: ^Word(0), rt: Word(2), expectRes: Word(1)}, // addu t0, s1, s2
{name: "xor", funct: 0x26, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1370)}, // xor t0, s1, s2 {name: "addu", funct: 0x21, isImm: false, rs: Word(0x7F_FF_FF_FF), rt: Word(1), expectRes: Word(0x80_00_00_00)}, // addu t0, s1, s2
{name: "xori", opcode: 0xe, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // xori t0, s1, 40
{name: "nor", funct: 0x27, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(4294965765)}, // nor t0, s1, s2 {name: "addi", opcode: 0x8, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addi t0, s1, 40
{name: "slt", funct: 0x2a, isImm: false, rs: 0xFF_FF_FF_FE, rt: Word(5), expectRes: Word(1)}, // slt t0, s1, s2 {name: "addi", opcode: 0x8, isImm: true, rs: ^Word(0), rt: Word(0xAA_BB_CC_DD), imm: uint16(1), expectRes: Word(0)}, // addi t0, s1, 40
{name: "sltu", funct: 0x2b, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(0)}, // sltu t0, s1, s2 {name: "addi", opcode: 0x8, isImm: true, rs: ^Word(0), rt: Word(0xAA_BB_CC_DD), imm: uint16(0xFF_FF), expectRes: Word(0xFF_FF_FF_FE)}, // addi t0, s1, 40
{name: "addi sign", opcode: 0x8, isImm: true, rs: Word(2), rt: Word(1), imm: uint16(0xfffe), expectRes: Word(0)}, // addi t0, s1, -2
{name: "addiu", opcode: 0x9, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addiu t0, s1, 40
{name: "addiu", opcode: 0x9, isImm: true, rs: ^Word(0), rt: Word(0xAA_BB_CC_DD), imm: uint16(1), expectRes: Word(0)}, // addiu t0, s1, 40
{name: "addiu", opcode: 0x9, isImm: true, rs: ^Word(0), rt: Word(0xAA_BB_CC_DD), imm: uint16(0xFF_FF), expectRes: Word(0xFF_FF_FF_FE)}, // addiu t0, s1, 40
{name: "sub", funct: 0x22, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // sub t0, s1, s2
{name: "sub", funct: 0x22, isImm: false, rs: ^Word(0), rt: Word(1), expectRes: Word(0xFF_FF_FF_FE)}, // sub t0, s1, s2
{name: "sub", funct: 0x22, isImm: false, rs: Word(1), rt: ^Word(0), expectRes: Word(0x2)}, // sub t0, s1, s2
{name: "subu", funct: 0x23, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // subu t0, s1, s2
{name: "subu", funct: 0x23, isImm: false, rs: ^Word(0), rt: Word(1), expectRes: Word(0xFF_FF_FF_FE)}, // subu t0, s1, s2
{name: "subu", funct: 0x23, isImm: false, rs: Word(1), rt: ^Word(0), expectRes: Word(0x2)}, // subu t0, s1, s2
{name: "and", funct: 0x24, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(160)}, // and t0, s1, s2
{name: "andi", opcode: 0xc, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(0)}, // andi t0, s1, 40
{name: "or", funct: 0x25, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1530)}, // or t0, s1, s2
{name: "ori", opcode: 0xd, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // ori t0, s1, 40
{name: "xor", funct: 0x26, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1370)}, // xor t0, s1, s2
{name: "xori", opcode: 0xe, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // xori t0, s1, 40
{name: "nor", funct: 0x27, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(4294965765)}, // nor t0, s1, s2
{name: "slt", funct: 0x2a, isImm: false, rs: 0xFF_FF_FF_FE, rt: Word(5), expectRes: Word(1)}, // slt t0, s1, s2
{name: "sltu", funct: 0x2b, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(0)}, // sltu t0, s1, s2
} }
for _, v := range versions { for _, v := range versions {
...@@ -236,15 +252,13 @@ func TestEVMSingleStep_Operators(t *testing.T) { ...@@ -236,15 +252,13 @@ func TestEVMSingleStep_Operators(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVMSingleStep_LoadStore(t *testing.T) { func TestEVMSingleStep_LoadStore(t *testing.T) {
var tracer *tracing.Hooks
loadMemVal := Word(0x11_22_33_44) loadMemVal := Word(0x11_22_33_44)
loadMemValNeg := Word(0xF1_F2_F3_F4) loadMemValNeg := Word(0xF1_F2_F3_F4)
rtVal := Word(0xaa_bb_cc_dd) rtVal := Word(0xaa_bb_cc_dd)
...@@ -330,14 +344,13 @@ func TestEVMSingleStep_LoadStore(t *testing.T) { ...@@ -330,14 +344,13 @@ func TestEVMSingleStep_LoadStore(t *testing.T) {
// Validate // Validate
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVMSingleStep_MovzMovn(t *testing.T) { func TestEVMSingleStep_MovzMovn(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -376,7 +389,7 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) { ...@@ -376,7 +389,7 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
if tt.funct == 0xa { if tt.funct == 0xa {
t2 = 0x1 t2 = 0x1
...@@ -392,7 +405,7 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) { ...@@ -392,7 +405,7 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
...@@ -400,7 +413,6 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) { ...@@ -400,7 +413,6 @@ func TestEVMSingleStep_MovzMovn(t *testing.T) {
} }
func TestEVMSingleStep_MfhiMflo(t *testing.T) { func TestEVMSingleStep_MfhiMflo(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -430,7 +442,7 @@ func TestEVMSingleStep_MfhiMflo(t *testing.T) { ...@@ -430,7 +442,7 @@ func TestEVMSingleStep_MfhiMflo(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
...@@ -453,9 +465,21 @@ func TestEVMSingleStep_MulDiv(t *testing.T) { ...@@ -453,9 +465,21 @@ func TestEVMSingleStep_MulDiv(t *testing.T) {
expectRevert string expectRevert string
errMsg string errMsg string
}{ }{
{name: "mul", funct: uint32(0x2), rs: Word(5), rt: Word(2), opcode: uint32(28), rdReg: uint32(0x8), expectRes: Word(10)}, // mul t0, t1, t2 {name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(5), rt: Word(2), rdReg: uint32(0x8), expectRes: Word(10)}, // mul t0, t1, t2
{name: "mult", funct: uint32(0x18), rs: Word(0x0F_FF_00_00), rt: Word(100), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)}, // mult t1, t2 {name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(0x1), rt: ^Word(0), rdReg: uint32(0x8), expectRes: ^Word(0)}, // mul t1, t2
{name: "multu", funct: uint32(0x19), rs: Word(0x0F_FF_00_00), rt: Word(100), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)}, // multu t1, t2 {name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(0xFF_FF_FF_FF), rt: Word(0xFF_FF_FF_FF), rdReg: uint32(0x8), expectRes: Word(0x1)}, // mul t1, t2
{name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_DD), rdReg: uint32(0x8), expectRes: Word(0xFC_FC_FD_27)}, // mul t1, t2
{name: "mult", funct: uint32(0x18), rs: Word(0x0F_FF_00_00), rt: Word(100), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)}, // mult t1, t2
{name: "mult", funct: uint32(0x18), rs: Word(0x1), rt: Word(0xFF_FF_FF_FF), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xFF_FF_FF_FF), expectLo: Word(0xFF_FF_FF_FF)}, // mult t1, t2
{name: "mult", funct: uint32(0x18), rs: Word(0xFF_FF_FF_FF), rt: Word(0xFF_FF_FF_FF), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0), expectLo: Word(0x1)}, // mult t1, t2
{name: "mult", funct: uint32(0x18), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_DD), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xE), expectLo: Word(0xFC_FC_FD_27)}, // mult t1, t2
{name: "multu", funct: uint32(0x19), rs: Word(0x0F_FF_00_00), rt: Word(100), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0x6), expectLo: Word(0x3F_9C_00_00)}, // multu t1, t2
{name: "multu", funct: uint32(0x19), rs: Word(0x1), rt: Word(0xFF_FF_FF_FF), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0x0), expectLo: Word(0xFF_FF_FF_FF)}, // multu t1, t2
{name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_FF), rt: Word(0xFF_FF_FF_FF), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xFF_FF_FF_FE), expectLo: Word(0x1)}, // multu t1, t2
{name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_DD), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xAA_BB_CC_BE), expectLo: Word(0xFC_FC_FD_27)}, // multu t1, t2
{name: "div", funct: uint32(0x1a), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // div t1, t2 {name: "div", funct: uint32(0x1a), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // div t1, t2
{name: "div by zero", funct: uint32(0x1a), rs: Word(5), rt: Word(0), rdReg: uint32(0x0), opcode: uint32(0), expectRevert: "instruction divide by zero", errMsg: "MIPS: division by zero"}, // div t1, t2 {name: "div by zero", funct: uint32(0x1a), rs: Word(5), rt: Word(0), rdReg: uint32(0x0), opcode: uint32(0), expectRevert: "instruction divide by zero", errMsg: "MIPS: division by zero"}, // div t1, t2
{name: "divu", funct: uint32(0x1b), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // divu t1, t2 {name: "divu", funct: uint32(0x1b), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // divu t1, t2
...@@ -483,7 +507,7 @@ func TestEVMSingleStep_MulDiv(t *testing.T) { ...@@ -483,7 +507,7 @@ func TestEVMSingleStep_MulDiv(t *testing.T) {
_, _ = goVm.Step( _, _ = goVm.Step(
false) false)
}) })
testutil.AssertEVMReverts(t, state, v.Contracts, tracer, proofData, tt.errMsg) testutil.AssertEVMReverts(t, state, v.Contracts, tracer, proofData, testutil.CreateErrorStringMatcher(tt.errMsg))
return return
} }
...@@ -503,14 +527,13 @@ func TestEVMSingleStep_MulDiv(t *testing.T) { ...@@ -503,14 +527,13 @@ func TestEVMSingleStep_MulDiv(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVMSingleStep_MthiMtlo(t *testing.T) { func TestEVMSingleStep_MthiMtlo(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -544,15 +567,13 @@ func TestEVMSingleStep_MthiMtlo(t *testing.T) { ...@@ -544,15 +567,13 @@ func TestEVMSingleStep_MthiMtlo(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVM_MMap(t *testing.T) { func TestEVM_MMap(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -609,15 +630,13 @@ func TestEVM_MMap(t *testing.T) { ...@@ -609,15 +630,13 @@ func TestEVM_MMap(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
} }
func TestEVMSysWriteHint(t *testing.T) { func TestEVMSysWriteHint(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -804,7 +823,7 @@ func TestEVMSysWriteHint(t *testing.T) { ...@@ -804,7 +823,7 @@ func TestEVMSysWriteHint(t *testing.T) {
expected.Validate(t, state) expected.Validate(t, state)
require.Equal(t, tt.expectedHints, oracle.Hints()) require.Equal(t, tt.expectedHints, oracle.Hints())
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
...@@ -814,23 +833,38 @@ func TestEVMFault(t *testing.T) { ...@@ -814,23 +833,38 @@ func TestEVMFault(t *testing.T) {
var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
misAlignedInstructionErr := func() testutil.ErrMatcher {
if arch.IsMips32 {
// matches revert(0,0)
return testutil.CreateNoopErrorMatcher()
} else {
return testutil.CreateCustomErrorMatcher("InvalidPC()")
}
}
cases := []struct { cases := []struct {
name string name string
pc arch.Word
nextPC arch.Word nextPC arch.Word
insn uint32 insn uint32
errMsg string errMsg testutil.ErrMatcher
memoryProofAddresses []Word memoryProofAddresses []Word
}{ }{
{"illegal instruction", 0, 0xFF_FF_FF_FF, "invalid instruction", []Word{0xa7ef00cc}}, {name: "illegal instruction", nextPC: 0, insn: 0xFF_FF_FF_FF, errMsg: testutil.CreateErrorStringMatcher("invalid instruction"), memoryProofAddresses: []Word{0xa7ef00cc}},
{"branch in delay-slot", 8, 0x11_02_00_03, "branch in delay slot", []Word{}}, {name: "branch in delay-slot", nextPC: 8, insn: 0x11_02_00_03, errMsg: testutil.CreateErrorStringMatcher("branch in delay slot")},
{"jump in delay-slot", 8, 0x0c_00_00_0c, "jump in delay slot", []Word{}}, {name: "jump in delay-slot", nextPC: 8, insn: 0x0c_00_00_0c, errMsg: testutil.CreateErrorStringMatcher("jump in delay slot")},
{name: "misaligned instruction", pc: 1, nextPC: 4, insn: 0b110111_00001_00001 << 16, errMsg: misAlignedInstructionErr()},
{name: "misaligned instruction", pc: 2, nextPC: 4, insn: 0b110111_00001_00001 << 16, errMsg: misAlignedInstructionErr()},
{name: "misaligned instruction", pc: 3, nextPC: 4, insn: 0b110111_00001_00001 << 16, errMsg: misAlignedInstructionErr()},
{name: "misaligned instruction", pc: 5, nextPC: 4, insn: 0b110111_00001_00001 << 16, errMsg: misAlignedInstructionErr()},
} }
for _, v := range versions { for _, v := range versions {
for _, tt := range cases { for _, tt := range cases {
testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) testName := fmt.Sprintf("%v (%v)", tt.name, v.Name)
t.Run(testName, func(t *testing.T) { t.Run(testName, func(t *testing.T) {
goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithNextPC(tt.nextPC)) goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithPC(tt.pc), testutil.WithNextPC(tt.nextPC))
state := goVm.GetState() state := goVm.GetState()
testutil.StoreInstruction(state.GetMemory(), 0, tt.insn) testutil.StoreInstruction(state.GetMemory(), 0, tt.insn)
// set the return address ($ra) to jump into when test completes // set the return address ($ra) to jump into when test completes
...@@ -846,7 +880,6 @@ func TestEVMFault(t *testing.T) { ...@@ -846,7 +880,6 @@ func TestEVMFault(t *testing.T) {
func TestHelloEVM(t *testing.T) { func TestHelloEVM(t *testing.T) {
t.Parallel() t.Parallel()
var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
for _, v := range versions { for _, v := range versions {
...@@ -854,22 +887,21 @@ func TestHelloEVM(t *testing.T) { ...@@ -854,22 +887,21 @@ func TestHelloEVM(t *testing.T) {
t.Run(v.Name, func(t *testing.T) { t.Run(v.Name, func(t *testing.T) {
t.Parallel() t.Parallel()
evm := testutil.NewMIPSEVM(v.Contracts) evm := testutil.NewMIPSEVM(v.Contracts)
evm.SetTracer(tracer)
testutil.LogStepFailureAtCleanup(t, evm) testutil.LogStepFailureAtCleanup(t, evm)
var stdOutBuf, stdErrBuf bytes.Buffer var stdOutBuf, stdErrBuf bytes.Buffer
elfFile := "../../testdata/example/bin/hello.elf" elfFile := testutil.ProgramPath("hello")
goVm := v.ElfVMFactory(t, elfFile, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger()) goVm := v.ElfVMFactory(t, elfFile, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger())
state := goVm.GetState() state := goVm.GetState()
start := time.Now() start := time.Now()
for i := 0; i < 400_000; i++ { for i := 0; i < 430_000; i++ {
step := goVm.GetState().GetStep() step := goVm.GetState().GetStep()
if goVm.GetState().GetExited() { if goVm.GetState().GetExited() {
break break
} }
insn := testutil.GetInstruction(state.GetMemory(), state.GetPC()) insn := testutil.GetInstruction(state.GetMemory(), state.GetPC())
if i%1000 == 0 { // avoid spamming test logs, we are executing many steps if i%10_000 == 0 { // avoid spamming test logs, we are executing many steps
t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.GetStep(), state.GetPC(), insn) t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.GetStep(), state.GetPC(), insn)
} }
...@@ -897,7 +929,6 @@ func TestHelloEVM(t *testing.T) { ...@@ -897,7 +929,6 @@ func TestHelloEVM(t *testing.T) {
func TestClaimEVM(t *testing.T) { func TestClaimEVM(t *testing.T) {
t.Parallel() t.Parallel()
var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
for _, v := range versions { for _, v := range versions {
...@@ -905,13 +936,12 @@ func TestClaimEVM(t *testing.T) { ...@@ -905,13 +936,12 @@ func TestClaimEVM(t *testing.T) {
t.Run(v.Name, func(t *testing.T) { t.Run(v.Name, func(t *testing.T) {
t.Parallel() t.Parallel()
evm := testutil.NewMIPSEVM(v.Contracts) evm := testutil.NewMIPSEVM(v.Contracts)
evm.SetTracer(tracer)
testutil.LogStepFailureAtCleanup(t, evm) testutil.LogStepFailureAtCleanup(t, evm)
oracle, expectedStdOut, expectedStdErr := testutil.ClaimTestOracle(t) oracle, expectedStdOut, expectedStdErr := testutil.ClaimTestOracle(t)
var stdOutBuf, stdErrBuf bytes.Buffer var stdOutBuf, stdErrBuf bytes.Buffer
elfFile := "../../testdata/example/bin/claim.elf" elfFile := testutil.ProgramPath("claim")
goVm := v.ElfVMFactory(t, elfFile, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger()) goVm := v.ElfVMFactory(t, elfFile, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger())
state := goVm.GetState() state := goVm.GetState()
...@@ -922,7 +952,7 @@ func TestClaimEVM(t *testing.T) { ...@@ -922,7 +952,7 @@ func TestClaimEVM(t *testing.T) {
} }
insn := testutil.GetInstruction(state.GetMemory(), state.GetPC()) insn := testutil.GetInstruction(state.GetMemory(), state.GetPC())
if i%1000 == 0 { // avoid spamming test logs, we are executing many steps if i%10_000 == 0 { // avoid spamming test logs, we are executing many steps
t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.GetStep(), state.GetPC(), insn) t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.GetStep(), state.GetPC(), insn)
} }
...@@ -947,7 +977,6 @@ func TestClaimEVM(t *testing.T) { ...@@ -947,7 +977,6 @@ func TestClaimEVM(t *testing.T) {
func TestEntryEVM(t *testing.T) { func TestEntryEVM(t *testing.T) {
t.Parallel() t.Parallel()
var tracer *tracing.Hooks // no-tracer by default, but see test_util.MarkdownTracer
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
for _, v := range versions { for _, v := range versions {
...@@ -955,11 +984,10 @@ func TestEntryEVM(t *testing.T) { ...@@ -955,11 +984,10 @@ func TestEntryEVM(t *testing.T) {
t.Run(v.Name, func(t *testing.T) { t.Run(v.Name, func(t *testing.T) {
t.Parallel() t.Parallel()
evm := testutil.NewMIPSEVM(v.Contracts) evm := testutil.NewMIPSEVM(v.Contracts)
evm.SetTracer(tracer)
testutil.LogStepFailureAtCleanup(t, evm) testutil.LogStepFailureAtCleanup(t, evm)
var stdOutBuf, stdErrBuf bytes.Buffer var stdOutBuf, stdErrBuf bytes.Buffer
elfFile := "../../testdata/example/bin/entry.elf" elfFile := testutil.ProgramPath("entry")
goVm := v.ElfVMFactory(t, elfFile, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger()) goVm := v.ElfVMFactory(t, elfFile, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger())
state := goVm.GetState() state := goVm.GetState()
...@@ -993,8 +1021,6 @@ func TestEntryEVM(t *testing.T) { ...@@ -993,8 +1021,6 @@ func TestEntryEVM(t *testing.T) {
} }
func TestEVMSingleStepBranch(t *testing.T) { func TestEVMSingleStepBranch(t *testing.T) {
var tracer *tracing.Hooks
versions := GetMipsVersionTestCases(t) versions := GetMipsVersionTestCases(t)
cases := []struct { cases := []struct {
name string name string
...@@ -1086,7 +1112,7 @@ func TestEVMSingleStepBranch(t *testing.T) { ...@@ -1086,7 +1112,7 @@ func TestEVMSingleStepBranch(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch"
...@@ -18,8 +17,6 @@ import ( ...@@ -18,8 +17,6 @@ import (
) )
func TestEVM_MT64_LL(t *testing.T) { func TestEVM_MT64_LL(t *testing.T) {
var tracer *tracing.Hooks
memVal := Word(0x11223344_55667788) memVal := Word(0x11223344_55667788)
memValNeg := Word(0xF1223344_F5667788) memValNeg := Word(0xF1223344_F5667788)
cases := []struct { cases := []struct {
...@@ -84,15 +81,13 @@ func TestEVM_MT64_LL(t *testing.T) { ...@@ -84,15 +81,13 @@ func TestEVM_MT64_LL(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_MT64_SC(t *testing.T) { func TestEVM_MT64_SC(t *testing.T) {
var tracer *tracing.Hooks
llVariations := []struct { llVariations := []struct {
name string name string
llReservationStatus multithreaded.LLReservationStatus llReservationStatus multithreaded.LLReservationStatus
...@@ -187,15 +182,13 @@ func TestEVM_MT64_SC(t *testing.T) { ...@@ -187,15 +182,13 @@ func TestEVM_MT64_SC(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_MT64_LLD(t *testing.T) { func TestEVM_MT64_LLD(t *testing.T) {
var tracer *tracing.Hooks
memVal := Word(0x11223344_55667788) memVal := Word(0x11223344_55667788)
memValNeg := Word(0xF1223344_F5667788) memValNeg := Word(0xF1223344_F5667788)
cases := []struct { cases := []struct {
...@@ -260,15 +253,13 @@ func TestEVM_MT64_LLD(t *testing.T) { ...@@ -260,15 +253,13 @@ func TestEVM_MT64_LLD(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_MT64_SCD(t *testing.T) { func TestEVM_MT64_SCD(t *testing.T) {
var tracer *tracing.Hooks
value := Word(0x11223344_55667788) value := Word(0x11223344_55667788)
llVariations := []struct { llVariations := []struct {
name string name string
...@@ -364,7 +355,7 @@ func TestEVM_MT64_SCD(t *testing.T) { ...@@ -364,7 +355,7 @@ func TestEVM_MT64_SCD(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
......
...@@ -26,8 +26,6 @@ import ( ...@@ -26,8 +26,6 @@ import (
type Word = arch.Word type Word = arch.Word
func TestEVM_MT_LL(t *testing.T) { func TestEVM_MT_LL(t *testing.T) {
var tracer *tracing.Hooks
// Set up some test values that will be reused // Set up some test values that will be reused
posValue := uint64(0xAAAA_BBBB_1122_3344) posValue := uint64(0xAAAA_BBBB_1122_3344)
posValueRet := uint64(0x1122_3344) posValueRet := uint64(0x1122_3344)
...@@ -90,15 +88,13 @@ func TestEVM_MT_LL(t *testing.T) { ...@@ -90,15 +88,13 @@ func TestEVM_MT_LL(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_MT_SC(t *testing.T) { func TestEVM_MT_SC(t *testing.T) {
var tracer *tracing.Hooks
// Set up some test values that will be reused // Set up some test values that will be reused
memValue := uint64(0x1122_3344_5566_7788) memValue := uint64(0x1122_3344_5566_7788)
...@@ -189,15 +185,13 @@ func TestEVM_MT_SC(t *testing.T) { ...@@ -189,15 +185,13 @@ func TestEVM_MT_SC(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_MT_SysRead_Preimage(t *testing.T) { func TestEVM_MT_SysRead_Preimage(t *testing.T) {
var tracer *tracing.Hooks
preimageValue := make([]byte, 0, 8) preimageValue := make([]byte, 0, 8)
preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x12_34_56_78) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x12_34_56_78)
preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x98_76_54_32) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x98_76_54_32)
...@@ -290,14 +284,14 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) { ...@@ -290,14 +284,14 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) {
if c.shouldPanic { if c.shouldPanic {
require.Panics(t, func() { _, _ = goVm.Step(true) }) require.Panics(t, func() { _, _ = goVm.Step(true) })
testutil.AssertPreimageOracleReverts(t, preimageKey, preimageValue, c.preimageOffset, contracts, tracer) testutil.AssertPreimageOracleReverts(t, preimageKey, preimageValue, c.preimageOffset, contracts)
} else { } else {
stepWitness, err := goVm.Step(true) stepWitness, err := goVm.Step(true)
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
}) })
} }
...@@ -305,8 +299,6 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) { ...@@ -305,8 +299,6 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) {
} }
func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) { func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) {
var tracer *tracing.Hooks
llVariations := []struct { llVariations := []struct {
name string name string
llReservationStatus multithreaded.LLReservationStatus llReservationStatus multithreaded.LLReservationStatus
...@@ -382,7 +374,7 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) { ...@@ -382,7 +374,7 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
...@@ -390,7 +382,6 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) { ...@@ -390,7 +382,6 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) {
func TestEVM_SysClone_FlagHandling(t *testing.T) { func TestEVM_SysClone_FlagHandling(t *testing.T) {
contracts := testutil.TestContractsSetup(t, testutil.MipsMultithreaded) contracts := testutil.TestContractsSetup(t, testutil.MipsMultithreaded)
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
...@@ -437,7 +428,6 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) { ...@@ -437,7 +428,6 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) {
} }
evm := testutil.NewMIPSEVM(contracts) evm := testutil.NewMIPSEVM(contracts)
evm.SetTracer(tracer)
testutil.LogStepFailureAtCleanup(t, evm) testutil.LogStepFailureAtCleanup(t, evm)
evmPost := evm.Step(t, stepWitness, curStep, multithreaded.GetStateHashFn()) evmPost := evm.Step(t, stepWitness, curStep, multithreaded.GetStateHashFn())
...@@ -449,7 +439,6 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) { ...@@ -449,7 +439,6 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) {
} }
func TestEVM_SysClone_Successful(t *testing.T) { func TestEVM_SysClone_Successful(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
traverseRight bool traverseRight bool
...@@ -506,13 +495,12 @@ func TestEVM_SysClone_Successful(t *testing.T) { ...@@ -506,13 +495,12 @@ func TestEVM_SysClone_Successful(t *testing.T) {
activeStack, inactiveStack := mttestutil.GetThreadStacks(state) activeStack, inactiveStack := mttestutil.GetThreadStacks(state)
require.Equal(t, 2, len(activeStack)) require.Equal(t, 2, len(activeStack))
require.Equal(t, 0, len(inactiveStack)) require.Equal(t, 0, len(inactiveStack))
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SysGetTID(t *testing.T) { func TestEVM_SysGetTID(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
threadId Word threadId Word
...@@ -545,13 +533,12 @@ func TestEVM_SysGetTID(t *testing.T) { ...@@ -545,13 +533,12 @@ func TestEVM_SysGetTID(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SysExit(t *testing.T) { func TestEVM_SysExit(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
threadCount int threadCount int
...@@ -594,13 +581,12 @@ func TestEVM_SysExit(t *testing.T) { ...@@ -594,13 +581,12 @@ func TestEVM_SysExit(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_PopExitedThread(t *testing.T) { func TestEVM_PopExitedThread(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
traverseRight bool traverseRight bool
...@@ -646,13 +632,12 @@ func TestEVM_PopExitedThread(t *testing.T) { ...@@ -646,13 +632,12 @@ func TestEVM_PopExitedThread(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SysFutex_WaitPrivate(t *testing.T) { func TestEVM_SysFutex_WaitPrivate(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
addressParam Word addressParam Word
...@@ -713,13 +698,12 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { ...@@ -713,13 +698,12 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SysFutex_WakePrivate(t *testing.T) { func TestEVM_SysFutex_WakePrivate(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
addressParam Word addressParam Word
...@@ -776,14 +760,12 @@ func TestEVM_SysFutex_WakePrivate(t *testing.T) { ...@@ -776,14 +760,12 @@ func TestEVM_SysFutex_WakePrivate(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { func TestEVM_SysFutex_UnsupportedOp(t *testing.T) {
var tracer *tracing.Hooks
// From: https://github.com/torvalds/linux/blob/5be63fc19fcaa4c236b307420483578a56986a37/include/uapi/linux/futex.h // From: https://github.com/torvalds/linux/blob/5be63fc19fcaa4c236b307420483578a56986a37/include/uapi/linux/futex.h
const FUTEX_PRIVATE_FLAG = 128 const FUTEX_PRIVATE_FLAG = 128
const FUTEX_WAIT = 0 const FUTEX_WAIT = 0
...@@ -855,7 +837,7 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { ...@@ -855,7 +837,7 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
...@@ -869,7 +851,6 @@ func TestEVM_SysNanosleep(t *testing.T) { ...@@ -869,7 +851,6 @@ func TestEVM_SysNanosleep(t *testing.T) {
} }
func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
traverseRight bool traverseRight bool
...@@ -908,15 +889,13 @@ func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { ...@@ -908,15 +889,13 @@ func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_SysOpen(t *testing.T) { func TestEVM_SysOpen(t *testing.T) {
var tracer *tracing.Hooks
goVm, state, contracts := setup(t, 5512, nil) goVm, state, contracts := setup(t, 5512, nil)
testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn)
...@@ -937,11 +916,10 @@ func TestEVM_SysOpen(t *testing.T) { ...@@ -937,11 +916,10 @@ func TestEVM_SysOpen(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
func TestEVM_SysGetPID(t *testing.T) { func TestEVM_SysGetPID(t *testing.T) {
var tracer *tracing.Hooks
goVm, state, contracts := setup(t, 1929, nil) goVm, state, contracts := setup(t, 1929, nil)
testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn)
...@@ -962,7 +940,7 @@ func TestEVM_SysGetPID(t *testing.T) { ...@@ -962,7 +940,7 @@ func TestEVM_SysGetPID(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
func TestEVM_SysClockGettimeMonotonic(t *testing.T) { func TestEVM_SysClockGettimeMonotonic(t *testing.T) {
...@@ -974,8 +952,6 @@ func TestEVM_SysClockGettimeRealtime(t *testing.T) { ...@@ -974,8 +952,6 @@ func TestEVM_SysClockGettimeRealtime(t *testing.T) {
} }
func testEVM_SysClockGettime(t *testing.T, clkid Word) { func testEVM_SysClockGettime(t *testing.T, clkid Word) {
var tracer *tracing.Hooks
llVariations := []struct { llVariations := []struct {
name string name string
llReservationStatus multithreaded.LLReservationStatus llReservationStatus multithreaded.LLReservationStatus
...@@ -1062,14 +1038,13 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { ...@@ -1062,14 +1038,13 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
} }
func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) { func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) {
var tracer *tracing.Hooks
goVm, state, contracts := setup(t, 2101, nil) goVm, state, contracts := setup(t, 2101, nil)
timespecAddr := Word(0x1000) timespecAddr := Word(0x1000)
...@@ -1091,7 +1066,7 @@ func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) { ...@@ -1091,7 +1066,7 @@ func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
var NoopSyscalls = map[string]uint32{ var NoopSyscalls = map[string]uint32{
...@@ -1132,7 +1107,6 @@ var NoopSyscalls = map[string]uint32{ ...@@ -1132,7 +1107,6 @@ var NoopSyscalls = map[string]uint32{
} }
func TestEVM_NoopSyscall(t *testing.T) { func TestEVM_NoopSyscall(t *testing.T) {
var tracer *tracing.Hooks
for noopName, noopVal := range NoopSyscalls { for noopName, noopVal := range NoopSyscalls {
t.Run(noopName, func(t *testing.T) { t.Run(noopName, func(t *testing.T) {
goVm, state, contracts := setup(t, int(noopVal), nil) goVm, state, contracts := setup(t, int(noopVal), nil)
...@@ -1155,7 +1129,7 @@ func TestEVM_NoopSyscall(t *testing.T) { ...@@ -1155,7 +1129,7 @@ func TestEVM_NoopSyscall(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
...@@ -1191,7 +1165,7 @@ func TestEVM_UnsupportedSyscall(t *testing.T) { ...@@ -1191,7 +1165,7 @@ func TestEVM_UnsupportedSyscall(t *testing.T) {
require.Panics(t, func() { _, _ = goVm.Step(true) }) require.Panics(t, func() { _, _ = goVm.Step(true) })
errorMessage := "MIPS2: unimplemented syscall" errorMessage := "MIPS2: unimplemented syscall"
testutil.AssertEVMReverts(t, state, contracts, tracer, proofData, errorMessage) testutil.AssertEVMReverts(t, state, contracts, tracer, proofData, testutil.CreateErrorStringMatcher(errorMessage))
}) })
} }
} }
...@@ -1223,14 +1197,13 @@ func TestEVM_EmptyThreadStacks(t *testing.T) { ...@@ -1223,14 +1197,13 @@ func TestEVM_EmptyThreadStacks(t *testing.T) {
require.PanicsWithValue(t, "Active thread stack is empty", func() { _, _ = goVm.Step(false) }) require.PanicsWithValue(t, "Active thread stack is empty", func() { _, _ = goVm.Step(false) })
errorMessage := "MIPS2: active thread stack is empty" errorMessage := "MIPS2: active thread stack is empty"
testutil.AssertEVMReverts(t, state, contracts, tracer, proofCase.Proof, errorMessage) testutil.AssertEVMReverts(t, state, contracts, tracer, proofCase.Proof, testutil.CreateErrorStringMatcher(errorMessage))
}) })
} }
} }
} }
func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
step uint64 step uint64
...@@ -1307,7 +1280,7 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { ...@@ -1307,7 +1280,7 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, c.step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, c.step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
...@@ -1315,7 +1288,6 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { ...@@ -1315,7 +1288,6 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) {
} }
func TestEVM_NormalTraversal_Full(t *testing.T) { func TestEVM_NormalTraversal_Full(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
threadCount int threadCount int
...@@ -1357,7 +1329,7 @@ func TestEVM_NormalTraversal_Full(t *testing.T) { ...@@ -1357,7 +1329,7 @@ func TestEVM_NormalTraversal_Full(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
// We should be back to the original state with only a few modifications // We should be back to the original state with only a few modifications
...@@ -1372,7 +1344,6 @@ func TestEVM_NormalTraversal_Full(t *testing.T) { ...@@ -1372,7 +1344,6 @@ func TestEVM_NormalTraversal_Full(t *testing.T) {
func TestEVM_WakeupTraversalStep(t *testing.T) { func TestEVM_WakeupTraversalStep(t *testing.T) {
addr := Word(0x1234) addr := Word(0x1234)
wakeupVal := Word(0x999) wakeupVal := Word(0x999)
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
wakeupAddr Word wakeupAddr Word
...@@ -1440,13 +1411,12 @@ func TestEVM_WakeupTraversalStep(t *testing.T) { ...@@ -1440,13 +1411,12 @@ func TestEVM_WakeupTraversalStep(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_WakeupTraversal_Full(t *testing.T) { func TestEVM_WakeupTraversal_Full(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
threadCount int threadCount int
...@@ -1486,7 +1456,7 @@ func TestEVM_WakeupTraversal_Full(t *testing.T) { ...@@ -1486,7 +1456,7 @@ func TestEVM_WakeupTraversal_Full(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
} }
// We should be back to the original state with only a few modifications // We should be back to the original state with only a few modifications
...@@ -1499,7 +1469,6 @@ func TestEVM_WakeupTraversal_Full(t *testing.T) { ...@@ -1499,7 +1469,6 @@ func TestEVM_WakeupTraversal_Full(t *testing.T) {
} }
func TestEVM_WakeupTraversal_WithExitedThreads(t *testing.T) { func TestEVM_WakeupTraversal_WithExitedThreads(t *testing.T) {
var tracer *tracing.Hooks
addr := Word(0x1234) addr := Word(0x1234)
wakeupVal := Word(0x999) wakeupVal := Word(0x999)
cases := []struct { cases := []struct {
...@@ -1567,13 +1536,12 @@ func TestEVM_WakeupTraversal_WithExitedThreads(t *testing.T) { ...@@ -1567,13 +1536,12 @@ func TestEVM_WakeupTraversal_WithExitedThreads(t *testing.T) {
stepWitness, err = goVm.Step(true) stepWitness, err = goVm.Step(true)
require.NoError(t, err) require.NoError(t, err)
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
func TestEVM_SchedQuantumThreshold(t *testing.T) { func TestEVM_SchedQuantumThreshold(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
stepsSinceLastContextSwitch uint64 stepsSinceLastContextSwitch uint64
...@@ -1613,7 +1581,7 @@ func TestEVM_SchedQuantumThreshold(t *testing.T) { ...@@ -1613,7 +1581,7 @@ func TestEVM_SchedQuantumThreshold(t *testing.T) {
// Validate post-state // Validate post-state
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), contracts)
}) })
} }
} }
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"os" "os"
"testing" "testing"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -17,8 +16,6 @@ import ( ...@@ -17,8 +16,6 @@ import (
) )
func TestEVM_LL(t *testing.T) { func TestEVM_LL(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
base Word base Word
...@@ -61,14 +58,12 @@ func TestEVM_LL(t *testing.T) { ...@@ -61,14 +58,12 @@ func TestEVM_LL(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
func TestEVM_SC(t *testing.T) { func TestEVM_SC(t *testing.T) {
var tracer *tracing.Hooks
cases := []struct { cases := []struct {
name string name string
base Word base Word
...@@ -115,14 +110,12 @@ func TestEVM_SC(t *testing.T) { ...@@ -115,14 +110,12 @@ func TestEVM_SC(t *testing.T) {
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
} }
func TestEVM_SysRead_Preimage(t *testing.T) { func TestEVM_SysRead_Preimage(t *testing.T) {
var tracer *tracing.Hooks
preimageValue := make([]byte, 0, 8) preimageValue := make([]byte, 0, 8)
preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x12_34_56_78) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x12_34_56_78)
preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x98_76_54_32) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x98_76_54_32)
...@@ -183,14 +176,14 @@ func TestEVM_SysRead_Preimage(t *testing.T) { ...@@ -183,14 +176,14 @@ func TestEVM_SysRead_Preimage(t *testing.T) {
if c.shouldPanic { if c.shouldPanic {
require.Panics(t, func() { _, _ = goVm.Step(true) }) require.Panics(t, func() { _, _ = goVm.Step(true) })
testutil.AssertPreimageOracleReverts(t, preimageKey, preimageValue, c.preimageOffset, v.Contracts, tracer) testutil.AssertPreimageOracleReverts(t, preimageKey, preimageValue, c.preimageOffset, v.Contracts)
} else { } else {
stepWitness, err := goVm.Step(true) stepWitness, err := goVm.Step(true)
require.NoError(t, err) require.NoError(t, err)
// Check expectations // Check expectations
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
} }
}) })
} }
......
//go:build cannon64
// +build cannon64
package tests
import (
"os"
"testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil"
"github.com/stretchr/testify/require"
)
func FuzzStateConsistencyMulOp(f *testing.F) {
f.Add(int64(0x80_00_00_00), int64(0x80_00_00_00), int64(1))
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)),
int64(1),
)
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)),
int64(1),
)
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)),
int64(1),
)
const opcode uint32 = 28
const mulFunct uint32 = 0x2
versions := GetMipsVersionTestCases(f)
f.Fuzz(func(t *testing.T, rs int64, rt int64, seed int64) {
for _, v := range versions {
t.Run(v.Name, func(t *testing.T) {
mulOpConsistencyCheck(t, versions, opcode, true, mulFunct, Word(rs), Word(rt), seed)
})
}
})
}
func FuzzStateConsistencyMultOp(f *testing.F) {
f.Add(int64(0x80_00_00_00), int64(0x80_00_00_00), int64(1))
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)),
int64(1),
)
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)),
int64(1),
)
f.Add(
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)),
testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)),
int64(1),
)
const multFunct uint32 = 0x18
versions := GetMipsVersionTestCases(f)
f.Fuzz(func(t *testing.T, rs int64, rt int64, seed int64) {
mulOpConsistencyCheck(t, versions, 0, false, multFunct, Word(rs), Word(rt), seed)
})
}
func FuzzStateConsistencyMultuOp(f *testing.F) {
f.Add(uint64(0x80_00_00_00), uint64(0x80_00_00_00), int64(1))
f.Add(
uint64(0xFF_FF_FF_FF_11_22_33_44),
uint64(0xFF_FF_FF_FF_11_22_33_44),
int64(1),
)
f.Add(
uint64(0xFF_FF_FF_FF_80_00_00_00),
uint64(0xFF_FF_FF_FF_80_00_00_00),
int64(1),
)
f.Add(
uint64(0xFF_FF_FF_FF_FF_FF_FF_FF),
uint64(0xFF_FF_FF_FF_FF_FF_FF_FF),
int64(1),
)
const multuFunct uint32 = 0x19
versions := GetMipsVersionTestCases(f)
f.Fuzz(func(t *testing.T, rs uint64, rt uint64, seed int64) {
mulOpConsistencyCheck(t, versions, 0, false, multuFunct, rs, rt, seed)
})
}
type insn struct {
opcode uint32
expectRdReg bool
funct uint32
}
func mulOpConsistencyCheck(
t *testing.T, versions []VersionedVMTestCase,
opcode uint32, expectRdReg bool, funct uint32,
rs Word, rt Word, seed int64) {
for _, v := range versions {
t.Run(v.Name, func(t *testing.T) {
rsReg := uint32(17)
rtReg := uint32(18)
rdReg := uint32(0)
if expectRdReg {
rdReg = 19
}
insn := opcode<<26 | rsReg<<21 | rtReg<<16 | rdReg<<11 | funct
goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed), testutil.WithPCAndNextPC(0))
state := goVm.GetState()
state.GetRegistersRef()[rsReg] = rs
state.GetRegistersRef()[rtReg] = rt
testutil.StoreInstruction(state.GetMemory(), 0, insn)
step := state.GetStep()
// mere sanity checks
expected := testutil.NewExpectedState(state)
expected.ExpectStep()
stepWitness, err := goVm.Step(true)
require.NoError(t, err)
// use the post-state rdReg or LO and HI just so we can run sanity checks
if expectRdReg {
expected.Registers[rdReg] = state.GetRegistersRef()[rdReg]
} else {
expected.LO = state.GetCpu().LO
expected.HI = state.GetCpu().HI
}
expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
})
}
}
...@@ -42,7 +42,7 @@ func FuzzStateSyscallBrk(f *testing.F) { ...@@ -42,7 +42,7 @@ func FuzzStateSyscallBrk(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -97,7 +97,7 @@ func FuzzStateSyscallMmap(f *testing.F) { ...@@ -97,7 +97,7 @@ func FuzzStateSyscallMmap(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -126,7 +126,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { ...@@ -126,7 +126,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -182,7 +182,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { ...@@ -182,7 +182,7 @@ func FuzzStateSyscallFcntl(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -219,7 +219,7 @@ func FuzzStateHintRead(f *testing.F) { ...@@ -219,7 +219,7 @@ func FuzzStateHintRead(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -282,7 +282,7 @@ func FuzzStatePreimageRead(f *testing.F) { ...@@ -282,7 +282,7 @@ func FuzzStatePreimageRead(f *testing.F) {
require.True(t, stepWitness.HasPreimage()) require.True(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -364,7 +364,7 @@ func FuzzStateHintWrite(f *testing.F) { ...@@ -364,7 +364,7 @@ func FuzzStateHintWrite(f *testing.F) {
// Validate // Validate
require.Equal(t, expectedHints, oracle.Hints()) require.Equal(t, expectedHints, oracle.Hints())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
...@@ -424,7 +424,7 @@ func FuzzStatePreimageWrite(f *testing.F) { ...@@ -424,7 +424,7 @@ func FuzzStatePreimageWrite(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts)
}) })
} }
}) })
......
...@@ -62,6 +62,6 @@ func FuzzStateSyscallCloneMT(f *testing.F) { ...@@ -62,6 +62,6 @@ func FuzzStateSyscallCloneMT(f *testing.F) {
require.False(t, stepWitness.HasPreimage()) require.False(t, stepWitness.HasPreimage())
expected.Validate(t, state) expected.Validate(t, state)
testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts, nil) testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts)
}) })
} }
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"math/big" "math/big"
"os" "os"
"github.com/ethereum-optimism/optimism/cannon/mipsevm/arch"
"github.com/ethereum-optimism/optimism/op-chain-ops/srcmap" "github.com/ethereum-optimism/optimism/op-chain-ops/srcmap"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
...@@ -66,7 +67,11 @@ func loadArtifacts(version MipsVersion) (*Artifacts, error) { ...@@ -66,7 +67,11 @@ func loadArtifacts(version MipsVersion) (*Artifacts, error) {
case MipsSingleThreaded: case MipsSingleThreaded:
mips, err = artifactFS.ReadArtifact("MIPS.sol", "MIPS") mips, err = artifactFS.ReadArtifact("MIPS.sol", "MIPS")
case MipsMultithreaded: case MipsMultithreaded:
mips, err = artifactFS.ReadArtifact("MIPS2.sol", "MIPS2") if arch.IsMips32 {
mips, err = artifactFS.ReadArtifact("MIPS2.sol", "MIPS2")
} else {
mips, err = artifactFS.ReadArtifact("MIPS64.sol", "MIPS64")
}
default: default:
return nil, fmt.Errorf("Unknown MipsVersion supplied: %v", version) return nil, fmt.Errorf("Unknown MipsVersion supplied: %v", version)
} }
...@@ -167,7 +172,11 @@ func SourceMapTracer(t require.TestingT, version MipsVersion, mips *foundry.Arti ...@@ -167,7 +172,11 @@ func SourceMapTracer(t require.TestingT, version MipsVersion, mips *foundry.Arti
case MipsSingleThreaded: case MipsSingleThreaded:
mipsSrcMap, err = srcFS.SourceMap(mips, "MIPS") mipsSrcMap, err = srcFS.SourceMap(mips, "MIPS")
case MipsMultithreaded: case MipsMultithreaded:
mipsSrcMap, err = srcFS.SourceMap(mips, "MIPS2") if arch.IsMips32 {
mipsSrcMap, err = srcFS.SourceMap(mips, "MIPS2")
} else {
mipsSrcMap, err = srcFS.SourceMap(mips, "MIPS64")
}
default: default:
require.Fail(t, "invalid mips version") require.Fail(t, "invalid mips version")
} }
......
...@@ -34,5 +34,5 @@ func GetInstruction(mem *memory.Memory, pc Word) uint32 { ...@@ -34,5 +34,5 @@ func GetInstruction(mem *memory.Memory, pc Word) uint32 {
if pc&0x3 != 0 { if pc&0x3 != 0 {
panic(fmt.Errorf("unaligned memory access: %x", pc)) panic(fmt.Errorf("unaligned memory access: %x", pc))
} }
return exec.LoadSubWord(mem, pc, 4, false, new(exec.NoopMemoryTracker)) return uint32(exec.LoadSubWord(mem, pc, 4, false, new(exec.NoopMemoryTracker)))
} }
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm"
...@@ -22,6 +23,9 @@ import ( ...@@ -22,6 +23,9 @@ import (
preimage "github.com/ethereum-optimism/optimism/op-preimage" preimage "github.com/ethereum-optimism/optimism/op-preimage"
) )
// maxStepGas should be less than the L1 gas limit
const maxStepGas = 20_000_000
type MIPSEVM struct { type MIPSEVM struct {
sender vm.AccountRef sender vm.AccountRef
startingGas uint64 startingGas uint64
...@@ -36,11 +40,29 @@ type MIPSEVM struct { ...@@ -36,11 +40,29 @@ type MIPSEVM struct {
lastPreimageOracleInput []byte lastPreimageOracleInput []byte
} }
func NewMIPSEVM(contracts *ContractMetadata) *MIPSEVM { func NewMIPSEVM(contracts *ContractMetadata, opts ...evmOption) *MIPSEVM {
env, evmState := NewEVMEnv(contracts) env, evmState := NewEVMEnv(contracts)
sender := vm.AccountRef{0x13, 0x37} sender := vm.AccountRef{0x13, 0x37}
startingGas := uint64(30_000_000) startingGas := uint64(maxStepGas)
return &MIPSEVM{sender, startingGas, env, evmState, contracts.Addresses, nil, contracts.Artifacts, math.MaxUint64, nil, nil} evm := &MIPSEVM{sender, startingGas, env, evmState, contracts.Addresses, nil, contracts.Artifacts, math.MaxUint64, nil, nil}
for _, opt := range opts {
opt(evm)
}
return evm
}
type evmOption func(c *MIPSEVM)
func WithSourceMapTracer(t *testing.T, ver MipsVersion) evmOption {
return func(evm *MIPSEVM) {
evm.SetSourceMapTracer(t, ver)
}
}
func WithTracingHooks(tracer *tracing.Hooks) evmOption {
return func(evm *MIPSEVM) {
evm.SetTracer(tracer)
}
} }
func (m *MIPSEVM) SetTracer(tracer *tracing.Hooks) { func (m *MIPSEVM) SetTracer(tracer *tracing.Hooks) {
...@@ -171,15 +193,8 @@ func LogStepFailureAtCleanup(t *testing.T, mipsEvm *MIPSEVM) { ...@@ -171,15 +193,8 @@ func LogStepFailureAtCleanup(t *testing.T, mipsEvm *MIPSEVM) {
} }
// ValidateEVM runs a single evm step and validates against an FPVM poststate // ValidateEVM runs a single evm step and validates against an FPVM poststate
func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, goVm mipsevm.FPVM, hashFn mipsevm.HashFn, contracts *ContractMetadata, tracer *tracing.Hooks) { func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, goVm mipsevm.FPVM, hashFn mipsevm.HashFn, contracts *ContractMetadata, opts ...evmOption) {
if !arch.IsMips32 { evm := NewMIPSEVM(contracts, opts...)
// TODO(#12250) Re-enable EVM validation once 64-bit MIPS contracts are completed
t.Logf("WARNING: Skipping EVM validation for 64-bit MIPS")
return
}
evm := NewMIPSEVM(contracts)
evm.SetTracer(tracer)
LogStepFailureAtCleanup(t, evm) LogStepFailureAtCleanup(t, evm)
evmPost := evm.Step(t, stepWitness, step, hashFn) evmPost := evm.Step(t, stepWitness, step, hashFn)
...@@ -188,15 +203,39 @@ func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, go ...@@ -188,15 +203,39 @@ func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, go
"mipsevm produced different state than EVM") "mipsevm produced different state than EVM")
} }
type ErrMatcher func(*testing.T, []byte)
func CreateNoopErrorMatcher() ErrMatcher {
return func(t *testing.T, ret []byte) {}
}
// CreateErrorStringMatcher matches an Error(string)
func CreateErrorStringMatcher(expect string) ErrMatcher {
return func(t *testing.T, ret []byte) {
require.Greaterf(t, len(ret), 4, "Return data length should be greater than 4 bytes: %x", ret)
unpacked, decodeErr := abi.UnpackRevert(ret)
require.NoError(t, decodeErr, "Failed to unpack revert reason")
require.Contains(t, unpacked, expect, "Revert reason mismatch")
}
}
// CreateCustomErrorMatcher matches a custom error given the error signature
func CreateCustomErrorMatcher(sig string) ErrMatcher {
return func(t *testing.T, ret []byte) {
expect := crypto.Keccak256([]byte(sig))[:4]
require.EqualValuesf(t, expect, ret, "return value is %x", ret)
}
}
// AssertEVMReverts runs a single evm step from an FPVM prestate and asserts that the VM panics // AssertEVMReverts runs a single evm step from an FPVM prestate and asserts that the VM panics
func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *ContractMetadata, tracer *tracing.Hooks, ProofData []byte, expectedReason string) { func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *ContractMetadata, tracer *tracing.Hooks, ProofData []byte, matcher ErrMatcher) {
encodedWitness, _ := state.EncodeWitness() encodedWitness, _ := state.EncodeWitness()
stepWitness := &mipsevm.StepWitness{ stepWitness := &mipsevm.StepWitness{
State: encodedWitness, State: encodedWitness,
ProofData: ProofData, ProofData: ProofData,
} }
input := EncodeStepInput(t, stepWitness, mipsevm.LocalContext{}, contracts.Artifacts.MIPS) input := EncodeStepInput(t, stepWitness, mipsevm.LocalContext{}, contracts.Artifacts.MIPS)
startingGas := uint64(30_000_000) startingGas := uint64(maxStepGas)
env, evmState := NewEVMEnv(contracts) env, evmState := NewEVMEnv(contracts)
env.Config.Tracer = tracer env.Config.Tracer = tracer
...@@ -204,18 +243,14 @@ func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *Contract ...@@ -204,18 +243,14 @@ func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *Contract
ret, _, err := env.Call(vm.AccountRef(sender), contracts.Addresses.MIPS, input, startingGas, common.U2560) ret, _, err := env.Call(vm.AccountRef(sender), contracts.Addresses.MIPS, input, startingGas, common.U2560)
require.EqualValues(t, err, vm.ErrExecutionReverted) require.EqualValues(t, err, vm.ErrExecutionReverted)
require.Greater(t, len(ret), 4, "Return data length should be greater than 4 bytes") matcher(t, ret)
unpacked, decodeErr := abi.UnpackRevert(ret)
require.NoError(t, decodeErr, "Failed to unpack revert reason")
require.Equal(t, expectedReason, unpacked, "Revert reason mismatch")
logs := evmState.Logs() logs := evmState.Logs()
require.Equal(t, 0, len(logs)) require.Equal(t, 0, len(logs))
} }
func AssertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset arch.Word, contracts *ContractMetadata, tracer *tracing.Hooks) { func AssertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset arch.Word, contracts *ContractMetadata, opts ...evmOption) {
evm := NewMIPSEVM(contracts) evm := NewMIPSEVM(contracts, opts...)
evm.SetTracer(tracer)
LogStepFailureAtCleanup(t, evm) LogStepFailureAtCleanup(t, evm)
evm.assertPreimageOracleReverts(t, preimageKey, preimageValue, preimageOffset) evm.assertPreimageOracleReverts(t, preimageKey, preimageValue, preimageOffset)
......
...@@ -238,6 +238,16 @@ func (s *SourceMapTracer) info(codeAddr common.Address, pc uint64) string { ...@@ -238,6 +238,16 @@ func (s *SourceMapTracer) info(codeAddr common.Address, pc uint64) string {
func (s *SourceMapTracer) OnOpCode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { func (s *SourceMapTracer) OnOpCode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
op := vm.OpCode(opcode) op := vm.OpCode(opcode)
var top []string
stk := scope.StackData()
for i := len(stk) - 1; i >= 0; i-- {
top = append(top, stk[i].Hex())
if len(top) == 4 {
break
}
}
stkInfo := fmt.Sprintf("[%s]", strings.Join(top, ", "))
if op.IsPush() { if op.IsPush() {
var val []byte var val []byte
sc, ok := scope.(*vm.ScopeContext) sc, ok := scope.(*vm.ScopeContext)
...@@ -248,10 +258,10 @@ func (s *SourceMapTracer) OnOpCode(pc uint64, opcode byte, gas, cost uint64, sco ...@@ -248,10 +258,10 @@ func (s *SourceMapTracer) OnOpCode(pc uint64, opcode byte, gas, cost uint64, sco
} else { } else {
val = []byte("N/A") val = []byte("N/A")
} }
fmt.Fprintf(s.out, "%-40s : pc %x opcode %s (%x)\n", s.info(scope.Address(), pc), pc, op.String(), val) fmt.Fprintf(s.out, "%-40s : pc %x opcode %s (%x) \t| stk[:%d] %s\n", s.info(scope.Address(), pc), pc, op.String(), val, len(top), stkInfo)
return return
} }
fmt.Fprintf(s.out, "%-40s : pc %x opcode %s\n", s.info(scope.Address(), pc), pc, op.String()) fmt.Fprintf(s.out, "%-40s : pc %x opcode %s \t\t| stk[:%d] %s\n", s.info(scope.Address(), pc), pc, op.String(), len(top), stkInfo)
} }
func (s *SourceMapTracer) OnFault(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { func (s *SourceMapTracer) OnFault(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) {
......
...@@ -147,6 +147,10 @@ ...@@ -147,6 +147,10 @@
"initCodeHash": "0xaedf0d0b0e94a0c5e7d987331d2fdba84230f5704a6ca33677e70cde7051b17e", "initCodeHash": "0xaedf0d0b0e94a0c5e7d987331d2fdba84230f5704a6ca33677e70cde7051b17e",
"sourceCodeHash": "0x9fa2d1297ad1e93b4d3c5c0fed08bedcd8f746807589f0fd3369e79347c6a027" "sourceCodeHash": "0x9fa2d1297ad1e93b4d3c5c0fed08bedcd8f746807589f0fd3369e79347c6a027"
}, },
"src/cannon/MIPS64.sol": {
"initCodeHash": "0x47c2bdd1e6fbb4941caa20a2ba5c2a66de198a8c7b540a9d4a0d84dbe8d3bfea",
"sourceCodeHash": "0xdb7f8a92ed552a2720f5fe3c0a32e4069026f0b23933145493ead88403206814"
},
"src/cannon/PreimageOracle.sol": { "src/cannon/PreimageOracle.sol": {
"initCodeHash": "0x5d7e8ae64f802bd9d760e3d52c0a620bd02405dc2c8795818db9183792ffe81c", "initCodeHash": "0x5d7e8ae64f802bd9d760e3d52c0a620bd02405dc2c8795818db9183792ffe81c",
"sourceCodeHash": "0x979d8595d925c70a123e72c062fa58c9ef94777c2e93b6bc3231d6679e2e9055" "sourceCodeHash": "0x979d8595d925c70a123e72c062fa58c9ef94777c2e93b6bc3231d6679e2e9055"
......
[
{
"inputs": [
{
"internalType": "contract IPreimageOracle",
"name": "_oracle",
"type": "address"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"inputs": [],
"name": "oracle",
"outputs": [
{
"internalType": "contract IPreimageOracle",
"name": "oracle_",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes",
"name": "_stateData",
"type": "bytes"
},
{
"internalType": "bytes",
"name": "_proof",
"type": "bytes"
},
{
"internalType": "bytes32",
"name": "_localContext",
"type": "bytes32"
}
],
"name": "step",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "version",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "InvalidExitedValue",
"type": "error"
},
{
"inputs": [],
"name": "InvalidMemoryProof",
"type": "error"
},
{
"inputs": [],
"name": "InvalidPC",
"type": "error"
},
{
"inputs": [],
"name": "InvalidRMWInstruction",
"type": "error"
},
{
"inputs": [],
"name": "InvalidSecondMemoryProof",
"type": "error"
}
]
\ No newline at end of file
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { ISemver } from "src/universal/interfaces/ISemver.sol";
import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol";
import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol";
import { MIPS64Syscalls as sys } from "src/cannon/libraries/MIPS64Syscalls.sol";
import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol";
import { MIPS64Instructions as ins } from "src/cannon/libraries/MIPS64Instructions.sol";
import { MIPS64Arch as arch } from "src/cannon/libraries/MIPS64Arch.sol";
import { VMStatuses } from "src/dispute/lib/Types.sol";
import {
InvalidMemoryProof, InvalidRMWInstruction, InvalidSecondMemoryProof
} from "src/cannon/libraries/CannonErrors.sol";
/// @title MIPS64
/// @notice The MIPS64 contract emulates a single MIPS instruction.
/// It differs from MIPS.sol in that it supports MIPS64 instructions and multi-tasking.
contract MIPS64 is ISemver {
/// @notice The thread context.
/// Total state size: 8 + 1 + 1 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 32 * 8 = 322 bytes
struct ThreadState {
// metadata
uint64 threadID;
uint8 exitCode;
bool exited;
// state
uint64 futexAddr;
uint64 futexVal;
uint64 futexTimeoutStep;
uint64 pc;
uint64 nextPC;
uint64 lo;
uint64 hi;
uint64[32] registers;
}
uint32 internal constant PACKED_THREAD_STATE_SIZE = 322;
uint8 internal constant LL_STATUS_NONE = 0;
uint8 internal constant LL_STATUS_ACTIVE_32_BIT = 0x1;
uint8 internal constant LL_STATUS_ACTIVE_64_BIT = 0x2;
/// @notice Stores the VM state.
/// Total state size: 32 + 32 + 8 + 8 + 1 + 8 + 8 + 1 + 1 + 8 + 8 + 8 + 1 + 32 + 32 + 8 = 196 bytes
/// If nextPC != pc + 4, then the VM is executing a branch/jump delay slot.
struct State {
bytes32 memRoot;
bytes32 preimageKey;
uint64 preimageOffset;
uint64 heap;
uint8 llReservationStatus;
uint64 llAddress;
uint64 llOwnerThread;
uint8 exitCode;
bool exited;
uint64 step;
uint64 stepsSinceLastContextSwitch;
uint64 wakeup;
bool traverseRight;
bytes32 leftThreadStack;
bytes32 rightThreadStack;
uint64 nextThreadID;
}
/// @notice The semantic version of the MIPS64 contract.
/// @custom:semver 1.0.0-beta.1
string public constant version = "1.0.0-beta.1";
/// @notice The preimage oracle contract.
IPreimageOracle internal immutable ORACLE;
// The offset of the start of proof calldata (_threadWitness.offset) in the step() function
uint256 internal constant THREAD_PROOF_OFFSET = 388;
// The offset of the start of proof calldata (_memProof.offset) in the step() function
uint256 internal constant MEM_PROOF_OFFSET = THREAD_PROOF_OFFSET + PACKED_THREAD_STATE_SIZE + 32;
// The empty thread root - keccak256(bytes32(0) ++ bytes32(0))
bytes32 internal constant EMPTY_THREAD_ROOT = hex"ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5";
// State memory offset allocated during step
uint256 internal constant STATE_MEM_OFFSET = 0x80;
// ThreadState memory offset allocated during step
uint256 internal constant TC_MEM_OFFSET = 0x280;
/// @param _oracle The address of the preimage oracle contract.
constructor(IPreimageOracle _oracle) {
ORACLE = _oracle;
}
/// @notice Getter for the pre-image oracle contract.
/// @return oracle_ The IPreimageOracle contract.
function oracle() external view returns (IPreimageOracle oracle_) {
oracle_ = ORACLE;
}
/// @notice Executes a single step of the multi-threaded vm.
/// Will revert if any required input state is missing.
/// @param _stateData The encoded state witness data.
/// @param _proof The encoded proof data: <<thread_context, inner_root>, <memory proof>.
/// Contains the thread context witness and the memory proof data for leaves within the MIPS VM's
/// memory.
/// The thread context witness is a packed tuple of the thread context and the immediate inner root of
/// the current thread stack.
/// @param _localContext The local key context for the preimage oracle. Optional, can be set as a constant
/// if the caller only requires one set of local keys.
function step(bytes calldata _stateData, bytes calldata _proof, bytes32 _localContext) public returns (bytes32) {
unchecked {
State memory state;
ThreadState memory thread;
uint32 exited;
assembly {
if iszero(eq(state, STATE_MEM_OFFSET)) {
// expected state mem offset check
revert(0, 0)
}
if iszero(eq(thread, TC_MEM_OFFSET)) {
// expected thread mem offset check
revert(0, 0)
}
if iszero(eq(mload(0x40), shl(5, 63))) {
// 4 + 16 state slots + 43 thread slots = 63 expected memory check
revert(0, 0)
}
if iszero(eq(_stateData.offset, 132)) {
// 32*4+4=132 expected state data offset
revert(0, 0)
}
if iszero(eq(_proof.offset, THREAD_PROOF_OFFSET)) {
// _stateData.offset = 132
// stateData.length = 196
// 32-byte align padding = 28
// _proof size prefix = 32
// expected thread proof offset equals the sum of the above is 388
revert(0, 0)
}
function putField(callOffset, memOffset, size) -> callOffsetOut, memOffsetOut {
// calldata is packed, thus starting left-aligned, shift-right to pad and right-align
let w := shr(shl(3, sub(32, size)), calldataload(callOffset))
mstore(memOffset, w)
callOffsetOut := add(callOffset, size)
memOffsetOut := add(memOffset, 32)
}
// Unpack state from calldata into memory
let c := _stateData.offset // calldata offset
let m := STATE_MEM_OFFSET // mem offset
c, m := putField(c, m, 32) // memRoot
c, m := putField(c, m, 32) // preimageKey
c, m := putField(c, m, 8) // preimageOffset
c, m := putField(c, m, 8) // heap
c, m := putField(c, m, 1) // llReservationStatus
c, m := putField(c, m, 8) // llAddress
c, m := putField(c, m, 8) // llOwnerThread
c, m := putField(c, m, 1) // exitCode
c, m := putField(c, m, 1) // exited
exited := mload(sub(m, 32))
c, m := putField(c, m, 8) // step
c, m := putField(c, m, 8) // stepsSinceLastContextSwitch
c, m := putField(c, m, 8) // wakeup
c, m := putField(c, m, 1) // traverseRight
c, m := putField(c, m, 32) // leftThreadStack
c, m := putField(c, m, 32) // rightThreadStack
c, m := putField(c, m, 8) // nextThreadID
}
st.assertExitedIsValid(exited);
if (state.exited) {
// thread state is unchanged
return outputState();
}
if (
(state.leftThreadStack == EMPTY_THREAD_ROOT && !state.traverseRight)
|| (state.rightThreadStack == EMPTY_THREAD_ROOT && state.traverseRight)
) {
revert("MIPS64: active thread stack is empty");
}
state.step += 1;
setThreadStateFromCalldata(thread);
validateCalldataThreadWitness(state, thread);
// Search for the first thread blocked by the wakeup call, if wakeup is set
// Don't allow regular execution until we resolved if we have woken up any thread.
if (state.wakeup != sys.FUTEX_EMPTY_ADDR) {
if (state.wakeup == thread.futexAddr) {
// completed wake traversal
// resume execution on woken up thread
state.wakeup = sys.FUTEX_EMPTY_ADDR;
return outputState();
} else {
bool traversingRight = state.traverseRight;
bool changedDirections = preemptThread(state, thread);
if (traversingRight && changedDirections) {
// then we've completed wake traversal
// resume thread execution
state.wakeup = sys.FUTEX_EMPTY_ADDR;
}
return outputState();
}
}
if (thread.exited) {
popThread(state);
return outputState();
}
// check if thread is blocked on a futex
if (thread.futexAddr != sys.FUTEX_EMPTY_ADDR) {
// if set, then check futex
// check timeout first
if (state.step > thread.futexTimeoutStep) {
// timeout! Allow execution
return onWaitComplete(thread, true);
} else {
uint64 mem = MIPS64Memory.readMem(
state.memRoot,
thread.futexAddr & arch.ADDRESS_MASK,
MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1)
);
if (thread.futexVal == mem) {
// still got expected value, continue sleeping, try next thread.
preemptThread(state, thread);
return outputState();
} else {
// wake thread up, the value at its address changed!
// Userspace can turn thread back to sleep if it was too sporadic.
return onWaitComplete(thread, false);
}
}
}
if (state.stepsSinceLastContextSwitch >= sys.SCHED_QUANTUM) {
preemptThread(state, thread);
return outputState();
}
state.stepsSinceLastContextSwitch += 1;
// instruction fetch
uint256 insnProofOffset = MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 0);
(uint32 insn, uint32 opcode, uint32 fun) =
ins.getInstructionDetails(thread.pc, state.memRoot, insnProofOffset);
// Handle syscall separately
// syscall (can read and write)
if (opcode == 0 && fun == 0xC) {
return handleSyscall(_localContext);
}
// Handle RMW (read-modify-write) ops
if (opcode == ins.OP_LOAD_LINKED || opcode == ins.OP_STORE_CONDITIONAL) {
return handleRMWOps(state, thread, insn, opcode);
}
if (opcode == ins.OP_LOAD_LINKED64 || opcode == ins.OP_STORE_CONDITIONAL64) {
return handleRMWOps(state, thread, insn, opcode);
}
// Exec the rest of the step logic
st.CpuScalars memory cpu = getCpuScalars(thread);
ins.CoreStepLogicParams memory coreStepArgs = ins.CoreStepLogicParams({
cpu: cpu,
registers: thread.registers,
memRoot: state.memRoot,
memProofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1),
insn: insn,
opcode: opcode,
fun: fun
});
bool memUpdated;
uint64 memAddr;
(state.memRoot, memUpdated, memAddr) = ins.execMipsCoreStepLogic(coreStepArgs);
setStateCpuScalars(thread, cpu);
updateCurrentThreadRoot();
if (memUpdated) {
handleMemoryUpdate(state, memAddr);
}
return outputState();
}
}
function handleMemoryUpdate(State memory _state, uint64 _memAddr) internal pure {
if (_memAddr == (arch.ADDRESS_MASK & _state.llAddress)) {
// Reserved address was modified, clear the reservation
clearLLMemoryReservation(_state);
}
}
function clearLLMemoryReservation(State memory _state) internal pure {
_state.llReservationStatus = LL_STATUS_NONE;
_state.llAddress = 0;
_state.llOwnerThread = 0;
}
function handleRMWOps(
State memory _state,
ThreadState memory _thread,
uint32 _insn,
uint32 _opcode
)
internal
returns (bytes32)
{
unchecked {
uint64 base = _thread.registers[(_insn >> 21) & 0x1F];
uint32 rtReg = (_insn >> 16) & 0x1F;
uint64 addr = base + ins.signExtendImmediate(_insn);
// Determine some opcode-specific parameters
uint8 targetStatus = LL_STATUS_ACTIVE_32_BIT;
uint64 byteLength = 4;
if (_opcode == ins.OP_LOAD_LINKED64 || _opcode == ins.OP_STORE_CONDITIONAL64) {
// Use 64-bit params
targetStatus = LL_STATUS_ACTIVE_64_BIT;
byteLength = 8;
}
uint64 retVal = 0;
uint64 threadId = _thread.threadID;
if (_opcode == ins.OP_LOAD_LINKED || _opcode == ins.OP_LOAD_LINKED64) {
retVal = loadSubWord(_state, addr, byteLength, true);
_state.llReservationStatus = targetStatus;
_state.llAddress = addr;
_state.llOwnerThread = threadId;
} else if (_opcode == ins.OP_STORE_CONDITIONAL || _opcode == ins.OP_STORE_CONDITIONAL64) {
// Check if our memory reservation is still intact
if (
_state.llReservationStatus == targetStatus && _state.llOwnerThread == threadId
&& _state.llAddress == addr
) {
// Complete atomic update: set memory and return 1 for success
clearLLMemoryReservation(_state);
uint64 val = _thread.registers[rtReg];
storeSubWord(_state, addr, byteLength, val);
retVal = 1;
} else {
// Atomic update failed, return 0 for failure
retVal = 0;
}
} else {
revert InvalidRMWInstruction();
}
st.CpuScalars memory cpu = getCpuScalars(_thread);
ins.handleRd(cpu, _thread.registers, rtReg, retVal, true);
setStateCpuScalars(_thread, cpu);
updateCurrentThreadRoot();
return outputState();
}
}
/// @notice Loads a subword of byteLength size contained from memory based on the low-order bits of vaddr
/// @param _vaddr The virtual address of the the subword.
/// @param _byteLength The size of the subword.
/// @param _signExtend Whether to sign extend the selected subwrod.
function loadSubWord(
State memory _state,
uint64 _vaddr,
uint64 _byteLength,
bool _signExtend
)
internal
pure
returns (uint64 val_)
{
uint64 effAddr = _vaddr & arch.ADDRESS_MASK;
uint256 memProofOffset = MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1);
uint64 mem = MIPS64Memory.readMem(_state.memRoot, effAddr, memProofOffset);
val_ = ins.selectSubWord(_vaddr, mem, _byteLength, _signExtend);
}
/// @notice Stores a word that has been updated by the specified subword at bit positions determined by the virtual
/// address
/// @param _vaddr The virtual address of the subword.
/// @param _byteLength The size of the subword.
/// @param _value The subword that updates _memWord.
function storeSubWord(State memory _state, uint64 _vaddr, uint64 _byteLength, uint64 _value) internal pure {
uint64 effAddr = _vaddr & arch.ADDRESS_MASK;
uint256 memProofOffset = MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1);
uint64 mem = MIPS64Memory.readMem(_state.memRoot, effAddr, memProofOffset);
uint64 newMemVal = ins.updateSubWord(_vaddr, mem, _byteLength, _value);
_state.memRoot = MIPS64Memory.writeMem(effAddr, memProofOffset, newMemVal);
}
function handleSyscall(bytes32 _localContext) internal returns (bytes32 out_) {
unchecked {
// Load state from memory offsets to reduce stack pressure
State memory state;
ThreadState memory thread;
assembly {
state := STATE_MEM_OFFSET
thread := TC_MEM_OFFSET
}
// Load the syscall numbers and args from the registers
(uint64 syscall_no, uint64 a0, uint64 a1, uint64 a2, uint64 a3) = sys.getSyscallArgs(thread.registers);
// Syscalls that are unimplemented but known return with v0=0 and v1=0
uint64 v0 = 0;
uint64 v1 = 0;
if (syscall_no == sys.SYS_MMAP) {
(v0, v1, state.heap) = sys.handleSysMmap(a0, a1, state.heap);
} else if (syscall_no == sys.SYS_BRK) {
// brk: Returns a fixed address for the program break at 0x40000000
v0 = sys.PROGRAM_BREAK;
} else if (syscall_no == sys.SYS_CLONE) {
if (sys.VALID_SYS_CLONE_FLAGS != a0) {
state.exited = true;
state.exitCode = VMStatuses.PANIC.raw();
return outputState();
}
v0 = state.nextThreadID;
v1 = 0;
ThreadState memory newThread;
newThread.threadID = state.nextThreadID;
newThread.exitCode = 0;
newThread.exited = false;
newThread.futexAddr = sys.FUTEX_EMPTY_ADDR;
newThread.futexVal = 0;
newThread.futexTimeoutStep = 0;
newThread.pc = thread.nextPC;
newThread.nextPC = thread.nextPC + 4;
newThread.lo = thread.lo;
newThread.hi = thread.hi;
for (uint256 i; i < 32; i++) {
newThread.registers[i] = thread.registers[i];
}
newThread.registers[29] = a1; // set stack pointer
// the child will perceive a 0 value as returned value instead, and no error
newThread.registers[2] = 0;
newThread.registers[7] = 0;
state.nextThreadID++;
// Preempt this thread for the new one. But not before updating PCs
st.CpuScalars memory cpu0 = getCpuScalars(thread);
sys.handleSyscallUpdates(cpu0, thread.registers, v0, v1);
setStateCpuScalars(thread, cpu0);
updateCurrentThreadRoot();
pushThread(state, newThread);
return outputState();
} else if (syscall_no == sys.SYS_EXIT_GROUP) {
// exit group: Sets the Exited and ExitCode states to true and argument 0.
state.exited = true;
state.exitCode = uint8(a0);
updateCurrentThreadRoot();
return outputState();
} else if (syscall_no == sys.SYS_READ) {
sys.SysReadParams memory args = sys.SysReadParams({
a0: a0,
a1: a1,
a2: a2,
preimageKey: state.preimageKey,
preimageOffset: state.preimageOffset,
localContext: _localContext,
oracle: ORACLE,
proofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1),
memRoot: state.memRoot
});
// Encapsulate execution to avoid stack-too-deep error
(v0, v1) = execSysRead(state, args);
} else if (syscall_no == sys.SYS_WRITE) {
(v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({
_a0: a0,
_a1: a1,
_a2: a2,
_preimageKey: state.preimageKey,
_preimageOffset: state.preimageOffset,
_proofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1),
_memRoot: state.memRoot
});
} else if (syscall_no == sys.SYS_FCNTL) {
(v0, v1) = sys.handleSysFcntl(a0, a1);
} else if (syscall_no == sys.SYS_GETTID) {
v0 = thread.threadID;
v1 = 0;
} else if (syscall_no == sys.SYS_EXIT) {
thread.exited = true;
thread.exitCode = uint8(a0);
if (lastThreadRemaining(state)) {
state.exited = true;
state.exitCode = uint8(a0);
}
updateCurrentThreadRoot();
return outputState();
} else if (syscall_no == sys.SYS_FUTEX) {
// args: a0 = addr, a1 = op, a2 = val, a3 = timeout
uint64 effAddr = a0 & arch.ADDRESS_MASK;
if (a1 == sys.FUTEX_WAIT_PRIVATE) {
uint64 mem = MIPS64Memory.readMem(
state.memRoot, effAddr, MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1)
);
if (mem != a2) {
v0 = sys.SYS_ERROR_SIGNAL;
v1 = sys.EAGAIN;
} else {
thread.futexAddr = effAddr;
thread.futexVal = a2;
thread.futexTimeoutStep = a3 == 0 ? sys.FUTEX_NO_TIMEOUT : state.step + sys.FUTEX_TIMEOUT_STEPS;
// Leave cpu scalars as-is. This instruction will be completed by `onWaitComplete`
updateCurrentThreadRoot();
return outputState();
}
} else if (a1 == sys.FUTEX_WAKE_PRIVATE) {
// Trigger thread traversal starting from the left stack until we find one waiting on the wakeup
// address
state.wakeup = effAddr;
// Don't indicate to the program that we've woken up a waiting thread, as there are no guarantees.
// The woken up thread should indicate this in userspace.
v0 = 0;
v1 = 0;
st.CpuScalars memory cpu0 = getCpuScalars(thread);
sys.handleSyscallUpdates(cpu0, thread.registers, v0, v1);
setStateCpuScalars(thread, cpu0);
preemptThread(state, thread);
state.traverseRight = state.leftThreadStack == EMPTY_THREAD_ROOT;
return outputState();
} else {
v0 = sys.SYS_ERROR_SIGNAL;
v1 = sys.EINVAL;
}
} else if (syscall_no == sys.SYS_SCHED_YIELD || syscall_no == sys.SYS_NANOSLEEP) {
v0 = 0;
v1 = 0;
st.CpuScalars memory cpu0 = getCpuScalars(thread);
sys.handleSyscallUpdates(cpu0, thread.registers, v0, v1);
setStateCpuScalars(thread, cpu0);
preemptThread(state, thread);
return outputState();
} else if (syscall_no == sys.SYS_OPEN) {
v0 = sys.SYS_ERROR_SIGNAL;
v1 = sys.EBADF;
} else if (syscall_no == sys.SYS_CLOCKGETTIME) {
if (a0 == sys.CLOCK_GETTIME_REALTIME_FLAG || a0 == sys.CLOCK_GETTIME_MONOTONIC_FLAG) {
v0 = 0;
v1 = 0;
uint64 secs = 0;
uint64 nsecs = 0;
if (a0 == sys.CLOCK_GETTIME_MONOTONIC_FLAG) {
secs = uint64(state.step / sys.HZ);
nsecs = uint64((state.step % sys.HZ) * (1_000_000_000 / sys.HZ));
}
uint64 effAddr = a1 & arch.ADDRESS_MASK;
// First verify the effAddr path
if (
!MIPS64Memory.isValidProof(
state.memRoot, effAddr, MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1)
)
) {
revert InvalidMemoryProof();
}
// Recompute the new root after updating effAddr
state.memRoot =
MIPS64Memory.writeMem(effAddr, MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1), secs);
handleMemoryUpdate(state, effAddr);
// Verify the second memory proof against the newly computed root
if (
!MIPS64Memory.isValidProof(
state.memRoot, effAddr + 8, MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 2)
)
) {
revert InvalidSecondMemoryProof();
}
state.memRoot =
MIPS64Memory.writeMem(effAddr + 8, MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 2), nsecs);
handleMemoryUpdate(state, effAddr + 8);
} else {
v0 = sys.SYS_ERROR_SIGNAL;
v1 = sys.EINVAL;
}
} else if (syscall_no == sys.SYS_GETPID) {
v0 = 0;
v1 = 0;
} else if (syscall_no == sys.SYS_MUNMAP) {
// ignored
} else if (syscall_no == sys.SYS_GETAFFINITY) {
// ignored
} else if (syscall_no == sys.SYS_MADVISE) {
// ignored
} else if (syscall_no == sys.SYS_RTSIGPROCMASK) {
// ignored
} else if (syscall_no == sys.SYS_SIGALTSTACK) {
// ignored
} else if (syscall_no == sys.SYS_RTSIGACTION) {
// ignored
} else if (syscall_no == sys.SYS_PRLIMIT64) {
// ignored
} else if (syscall_no == sys.SYS_CLOSE) {
// ignored
} else if (syscall_no == sys.SYS_PREAD64) {
// ignored
} else if (syscall_no == sys.SYS_FSTAT) {
// ignored
} else if (syscall_no == sys.SYS_OPENAT) {
// ignored
} else if (syscall_no == sys.SYS_READLINK) {
// ignored
} else if (syscall_no == sys.SYS_READLINKAT) {
// ignored
} else if (syscall_no == sys.SYS_IOCTL) {
// ignored
} else if (syscall_no == sys.SYS_EPOLLCREATE1) {
// ignored
} else if (syscall_no == sys.SYS_PIPE2) {
// ignored
} else if (syscall_no == sys.SYS_EPOLLCTL) {
// ignored
} else if (syscall_no == sys.SYS_EPOLLPWAIT) {
// ignored
} else if (syscall_no == sys.SYS_GETRANDOM) {
// ignored
} else if (syscall_no == sys.SYS_UNAME) {
// ignored
} else if (syscall_no == sys.SYS_GETUID) {
// ignored
} else if (syscall_no == sys.SYS_GETGID) {
// ignored
} else if (syscall_no == sys.SYS_MINCORE) {
// ignored
} else if (syscall_no == sys.SYS_TGKILL) {
// ignored
} else if (syscall_no == sys.SYS_SETITIMER) {
// ignored
} else if (syscall_no == sys.SYS_TIMERCREATE) {
// ignored
} else if (syscall_no == sys.SYS_TIMERSETTIME) {
// ignored
} else if (syscall_no == sys.SYS_TIMERDELETE) {
// ignored
} else if (syscall_no == sys.SYS_GETRLIMIT) {
// ignored
} else if (syscall_no == sys.SYS_LSEEK) {
// ignored
} else {
revert("MIPS64: unimplemented syscall");
}
st.CpuScalars memory cpu = getCpuScalars(thread);
sys.handleSyscallUpdates(cpu, thread.registers, v0, v1);
setStateCpuScalars(thread, cpu);
updateCurrentThreadRoot();
out_ = outputState();
}
}
function execSysRead(
State memory _state,
sys.SysReadParams memory _args
)
internal
view
returns (uint64 v0_, uint64 v1_)
{
bool memUpdated;
uint64 memAddr;
(v0_, v1_, _state.preimageOffset, _state.memRoot, memUpdated, memAddr) = sys.handleSysRead(_args);
if (memUpdated) {
handleMemoryUpdate(_state, memAddr);
}
}
/// @notice Computes the hash of the MIPS state.
/// @return out_ The hashed MIPS state.
function outputState() internal returns (bytes32 out_) {
uint32 exited;
assembly {
// copies 'size' bytes, right-aligned in word at 'from', to 'to', incl. trailing data
function copyMem(from, to, size) -> fromOut, toOut {
mstore(to, mload(add(from, sub(32, size))))
fromOut := add(from, 32)
toOut := add(to, size)
}
// From points to the MIPS State
let from := STATE_MEM_OFFSET
// Copy to the free memory pointer
let start := mload(0x40)
let to := start
// Copy state to free memory
from, to := copyMem(from, to, 32) // memRoot
from, to := copyMem(from, to, 32) // preimageKey
from, to := copyMem(from, to, 8) // preimageOffset
from, to := copyMem(from, to, 8) // heap
from, to := copyMem(from, to, 1) // llReservationStatus
from, to := copyMem(from, to, 8) // llAddress
from, to := copyMem(from, to, 8) // llOwnerThread
let exitCode := mload(from)
from, to := copyMem(from, to, 1) // exitCode
exited := mload(from)
from, to := copyMem(from, to, 1) // exited
from, to := copyMem(from, to, 8) // step
from, to := copyMem(from, to, 8) // stepsSinceLastContextSwitch
from, to := copyMem(from, to, 8) // wakeup
from, to := copyMem(from, to, 1) // traverseRight
from, to := copyMem(from, to, 32) // leftThreadStack
from, to := copyMem(from, to, 32) // rightThreadStack
from, to := copyMem(from, to, 8) // nextThreadID
// Clean up end of memory
mstore(to, 0)
// Log the resulting MIPS state, for debugging
log0(start, sub(to, start))
// Determine the VM status
let status := 0
switch exited
case 1 {
switch exitCode
// VMStatusValid
case 0 { status := 0 }
// VMStatusInvalid
case 1 { status := 1 }
// VMStatusPanic
default { status := 2 }
}
// VMStatusUnfinished
default { status := 3 }
// Compute the hash of the resulting MIPS state and set the status byte
out_ := keccak256(start, sub(to, start))
out_ := or(and(not(shl(248, 0xFF)), out_), shl(248, status))
}
st.assertExitedIsValid(exited);
}
/// @notice Updates the current thread stack root via inner thread root in calldata
function updateCurrentThreadRoot() internal pure {
State memory state;
ThreadState memory thread;
assembly {
state := STATE_MEM_OFFSET
thread := TC_MEM_OFFSET
}
bytes32 updatedRoot = computeThreadRoot(loadCalldataInnerThreadRoot(), thread);
if (state.traverseRight) {
state.rightThreadStack = updatedRoot;
} else {
state.leftThreadStack = updatedRoot;
}
}
/// @notice Completes the FUTEX_WAIT syscall.
function onWaitComplete(ThreadState memory _thread, bool _isTimedOut) internal returns (bytes32 out_) {
// Note: no need to reset State.wakeup. If we're here, the wakeup field has already been reset
// Clear the futex state
_thread.futexAddr = sys.FUTEX_EMPTY_ADDR;
_thread.futexVal = 0;
_thread.futexTimeoutStep = 0;
// Complete the FUTEX_WAIT syscall
uint64 v0 = _isTimedOut ? sys.SYS_ERROR_SIGNAL : 0;
// set errno
uint64 v1 = _isTimedOut ? sys.ETIMEDOUT : 0;
st.CpuScalars memory cpu = getCpuScalars(_thread);
sys.handleSyscallUpdates(cpu, _thread.registers, v0, v1);
setStateCpuScalars(_thread, cpu);
updateCurrentThreadRoot();
out_ = outputState();
}
/// @notice Preempts the current thread for another and updates the VM state.
/// It reads the inner thread root from calldata to update the current thread stack root.
function preemptThread(
State memory _state,
ThreadState memory _thread
)
internal
pure
returns (bool changedDirections_)
{
// pop thread from the current stack and push to the other stack
if (_state.traverseRight) {
require(_state.rightThreadStack != EMPTY_THREAD_ROOT, "MIPS64: empty right thread stack");
_state.rightThreadStack = loadCalldataInnerThreadRoot();
_state.leftThreadStack = computeThreadRoot(_state.leftThreadStack, _thread);
} else {
require(_state.leftThreadStack != EMPTY_THREAD_ROOT, "MIPS64: empty left thread stack");
_state.leftThreadStack = loadCalldataInnerThreadRoot();
_state.rightThreadStack = computeThreadRoot(_state.rightThreadStack, _thread);
}
bytes32 current = _state.traverseRight ? _state.rightThreadStack : _state.leftThreadStack;
if (current == EMPTY_THREAD_ROOT) {
_state.traverseRight = !_state.traverseRight;
changedDirections_ = true;
}
_state.stepsSinceLastContextSwitch = 0;
}
/// @notice Pushes a thread to the current thread stack.
function pushThread(State memory _state, ThreadState memory _thread) internal pure {
if (_state.traverseRight) {
_state.rightThreadStack = computeThreadRoot(_state.rightThreadStack, _thread);
} else {
_state.leftThreadStack = computeThreadRoot(_state.leftThreadStack, _thread);
}
_state.stepsSinceLastContextSwitch = 0;
}
/// @notice Removes the current thread from the stack.
function popThread(State memory _state) internal pure {
if (_state.traverseRight) {
_state.rightThreadStack = loadCalldataInnerThreadRoot();
} else {
_state.leftThreadStack = loadCalldataInnerThreadRoot();
}
bytes32 current = _state.traverseRight ? _state.rightThreadStack : _state.leftThreadStack;
if (current == EMPTY_THREAD_ROOT) {
_state.traverseRight = !_state.traverseRight;
}
_state.stepsSinceLastContextSwitch = 0;
}
/// @notice Returns true if the number of threads is 1
function lastThreadRemaining(State memory _state) internal pure returns (bool out_) {
bytes32 inactiveStack = _state.traverseRight ? _state.leftThreadStack : _state.rightThreadStack;
bool currentStackIsAlmostEmpty = loadCalldataInnerThreadRoot() == EMPTY_THREAD_ROOT;
return inactiveStack == EMPTY_THREAD_ROOT && currentStackIsAlmostEmpty;
}
function computeThreadRoot(bytes32 _currentRoot, ThreadState memory _thread) internal pure returns (bytes32 out_) {
// w_i = hash(w_0 ++ hash(thread))
bytes32 threadRoot = outputThreadState(_thread);
out_ = keccak256(abi.encodePacked(_currentRoot, threadRoot));
}
function outputThreadState(ThreadState memory _thread) internal pure returns (bytes32 out_) {
assembly {
// copies 'size' bytes, right-aligned in word at 'from', to 'to', incl. trailing data
function copyMem(from, to, size) -> fromOut, toOut {
mstore(to, mload(add(from, sub(32, size))))
fromOut := add(from, 32)
toOut := add(to, size)
}
// From points to the ThreadState
let from := _thread
// Copy to the free memory pointer
let start := mload(0x40)
let to := start
// Copy state to free memory
from, to := copyMem(from, to, 8) // threadID
from, to := copyMem(from, to, 1) // exitCode
from, to := copyMem(from, to, 1) // exited
from, to := copyMem(from, to, 8) // futexAddr
from, to := copyMem(from, to, 8) // futexVal
from, to := copyMem(from, to, 8) // futexTimeoutStep
from, to := copyMem(from, to, 8) // pc
from, to := copyMem(from, to, 8) // nextPC
from, to := copyMem(from, to, 8) // lo
from, to := copyMem(from, to, 8) // hi
from := mload(from) // offset to registers
// Copy registers
for { let i := 0 } lt(i, 32) { i := add(i, 1) } { from, to := copyMem(from, to, 8) }
// Clean up end of memory
mstore(to, 0)
// Compute the hash of the resulting ThreadState
out_ := keccak256(start, sub(to, start))
}
}
function getCpuScalars(ThreadState memory _tc) internal pure returns (st.CpuScalars memory cpu_) {
cpu_ = st.CpuScalars({ pc: _tc.pc, nextPC: _tc.nextPC, lo: _tc.lo, hi: _tc.hi });
}
function setStateCpuScalars(ThreadState memory _tc, st.CpuScalars memory _cpu) internal pure {
_tc.pc = _cpu.pc;
_tc.nextPC = _cpu.nextPC;
_tc.lo = _cpu.lo;
_tc.hi = _cpu.hi;
}
/// @notice Validates the thread witness in calldata against the current thread.
function validateCalldataThreadWitness(State memory _state, ThreadState memory _thread) internal pure {
bytes32 witnessRoot = computeThreadRoot(loadCalldataInnerThreadRoot(), _thread);
bytes32 expectedRoot = _state.traverseRight ? _state.rightThreadStack : _state.leftThreadStack;
require(expectedRoot == witnessRoot, "MIPS64: invalid thread witness");
}
/// @notice Sets the thread context from calldata.
function setThreadStateFromCalldata(ThreadState memory _thread) internal pure {
uint256 s = 0;
assembly {
s := calldatasize()
}
// verify we have enough calldata
require(
s >= (THREAD_PROOF_OFFSET + PACKED_THREAD_STATE_SIZE), "MIPS64: insufficient calldata for thread witness"
);
unchecked {
assembly {
function putField(callOffset, memOffset, size) -> callOffsetOut, memOffsetOut {
// calldata is packed, thus starting left-aligned, shift-right to pad and right-align
let w := shr(shl(3, sub(32, size)), calldataload(callOffset))
mstore(memOffset, w)
callOffsetOut := add(callOffset, size)
memOffsetOut := add(memOffset, 32)
}
let c := THREAD_PROOF_OFFSET
let m := _thread
c, m := putField(c, m, 8) // threadID
c, m := putField(c, m, 1) // exitCode
c, m := putField(c, m, 1) // exited
c, m := putField(c, m, 8) // futexAddr
c, m := putField(c, m, 8) // futexVal
c, m := putField(c, m, 8) // futexTimeoutStep
c, m := putField(c, m, 8) // pc
c, m := putField(c, m, 8) // nextPC
c, m := putField(c, m, 8) // lo
c, m := putField(c, m, 8) // hi
m := mload(m) // offset to registers
// Unpack register calldata into memory
for { let i := 0 } lt(i, 32) { i := add(i, 1) } { c, m := putField(c, m, 8) }
}
}
}
/// @notice Loads the inner root for the current thread hash onion from calldata.
function loadCalldataInnerThreadRoot() internal pure returns (bytes32 innerThreadRoot_) {
uint256 s = 0;
assembly {
s := calldatasize()
innerThreadRoot_ := calldataload(add(THREAD_PROOF_OFFSET, PACKED_THREAD_STATE_SIZE))
}
// verify we have enough calldata
require(
s >= (THREAD_PROOF_OFFSET + (PACKED_THREAD_STATE_SIZE + 32)),
"MIPS64: insufficient calldata for thread witness"
);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
library MIPS64Arch {
uint64 internal constant WORD_SIZE = 64;
uint64 internal constant WORD_SIZE_BYTES = 8;
uint64 internal constant EXT_MASK = 0x7;
uint64 internal constant ADDRESS_MASK = 0xFFFFFFFFFFFFFFF8;
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol";
import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol";
import { MIPS64Arch as arch } from "src/cannon/libraries/MIPS64Arch.sol";
library MIPS64Instructions {
uint32 internal constant OP_LOAD_LINKED = 0x30;
uint32 internal constant OP_STORE_CONDITIONAL = 0x38;
uint32 internal constant OP_LOAD_LINKED64 = 0x34;
uint32 internal constant OP_STORE_CONDITIONAL64 = 0x3C;
uint32 internal constant OP_LOAD_DOUBLE_LEFT = 0x1A;
uint32 internal constant OP_LOAD_DOUBLE_RIGHT = 0x1B;
uint32 internal constant REG_RA = 31;
uint64 internal constant U64_MASK = 0xFFFFFFFFFFFFFFFF;
uint32 internal constant U32_MASK = 0xFFffFFff;
error InvalidPC();
struct CoreStepLogicParams {
/// @param opcode The opcode value parsed from insn_.
st.CpuScalars cpu;
/// @param registers The CPU registers.
uint64[32] registers;
/// @param memRoot The current merkle root of the memory.
bytes32 memRoot;
/// @param memProofOffset The offset in calldata specify where the memory merkle proof is located.
uint256 memProofOffset;
/// @param insn The current 32-bit instruction at the pc.
uint32 insn;
/// @param cpu The CPU scalar fields.
uint32 opcode;
/// @param fun The function value parsed from insn_.
uint32 fun;
}
/// @param _pc The program counter.
/// @param _memRoot The current memory root.
/// @param _insnProofOffset The calldata offset of the memory proof for the current instruction.
/// @return insn_ The current 32-bit instruction at the pc.
/// @return opcode_ The opcode value parsed from insn_.
/// @return fun_ The function value parsed from insn_.
function getInstructionDetails(
uint64 _pc,
bytes32 _memRoot,
uint256 _insnProofOffset
)
internal
pure
returns (uint32 insn_, uint32 opcode_, uint32 fun_)
{
unchecked {
if (_pc & 0x3 != 0) {
revert InvalidPC();
}
uint64 word = MIPS64Memory.readMem(_memRoot, _pc & arch.ADDRESS_MASK, _insnProofOffset);
insn_ = uint32(selectSubWord(_pc, word, 4, false));
opcode_ = insn_ >> 26; // First 6-bits
fun_ = insn_ & 0x3f; // Last 6-bits
return (insn_, opcode_, fun_);
}
}
/// @notice Execute core MIPS step logic.
/// @return newMemRoot_ The updated merkle root of memory after any modifications, may be unchanged.
/// @return memUpdated_ True if memory was modified.
/// @return memAddr_ Holds the memory address that was updated if memUpdated_ is true.
function execMipsCoreStepLogic(CoreStepLogicParams memory _args)
internal
pure
returns (bytes32 newMemRoot_, bool memUpdated_, uint64 memAddr_)
{
unchecked {
newMemRoot_ = _args.memRoot;
memUpdated_ = false;
memAddr_ = 0;
// j-type j/jal
if (_args.opcode == 2 || _args.opcode == 3) {
// Take top 4 bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset
uint64 target = (_args.cpu.nextPC & signExtend(0xF0000000, 32)) | uint64((_args.insn & 0x03FFFFFF) << 2);
handleJump(_args.cpu, _args.registers, _args.opcode == 2 ? 0 : REG_RA, target);
return (newMemRoot_, memUpdated_, memAddr_);
}
// register fetch
uint64 rs = 0; // source register 1 value
uint64 rt = 0; // source register 2 / temp value
uint64 rtReg = uint64((_args.insn >> 16) & 0x1F);
// R-type or I-type (stores rt)
rs = _args.registers[(_args.insn >> 21) & 0x1F];
uint64 rdReg = rtReg;
// 64-bit opcodes lwu, ldl, ldr
if (_args.opcode == 0x27 || _args.opcode == 0x1A || _args.opcode == 0x1B) {
rt = _args.registers[rtReg];
rdReg = rtReg;
} else if (_args.opcode == 0 || _args.opcode == 0x1c) {
// R-type (stores rd)
rt = _args.registers[rtReg];
rdReg = uint64((_args.insn >> 11) & 0x1F);
} else if (_args.opcode < 0x20) {
// rt is SignExtImm
// don't sign extend for andi, ori, xori
if (_args.opcode == 0xC || _args.opcode == 0xD || _args.opcode == 0xe) {
// ZeroExtImm
rt = uint64(_args.insn & 0xFFFF);
} else {
// SignExtImm
rt = signExtendImmediate(_args.insn);
}
} else if (_args.opcode >= 0x28 || _args.opcode == 0x22 || _args.opcode == 0x26) {
// store rt value with store
rt = _args.registers[rtReg];
// store actual rt with lwl and lwr
rdReg = rtReg;
}
if ((_args.opcode >= 4 && _args.opcode < 8) || _args.opcode == 1) {
handleBranch({
_cpu: _args.cpu,
_registers: _args.registers,
_opcode: _args.opcode,
_insn: _args.insn,
_rtReg: rtReg,
_rs: rs
});
return (newMemRoot_, memUpdated_, memAddr_);
}
uint64 storeAddr = U64_MASK;
// memory fetch (all I-type)
// we do the load for stores also
uint64 mem = 0;
if (_args.opcode >= 0x20 || _args.opcode == OP_LOAD_DOUBLE_LEFT || _args.opcode == OP_LOAD_DOUBLE_RIGHT) {
// M[R[rs]+SignExtImm]
rs += signExtendImmediate(_args.insn);
uint64 addr = rs & arch.ADDRESS_MASK;
mem = MIPS64Memory.readMem(_args.memRoot, addr, _args.memProofOffset);
if (_args.opcode >= 0x28) {
// store for 32-bit
// for 64-bit: ld (0x37) is the only non-store opcode >= 0x28
if (_args.opcode != 0x37) {
// store
storeAddr = addr;
// store opcodes don't write back to a register
rdReg = 0;
}
}
}
// ALU
// Note: swr outputs more than 8 bytes without the u64_mask
uint64 val = executeMipsInstruction(_args.insn, _args.opcode, _args.fun, rs, rt, mem) & U64_MASK;
uint64 funSel = 0x20;
if (_args.opcode == 0 && _args.fun >= 8 && _args.fun < funSel) {
if (_args.fun == 8 || _args.fun == 9) {
// jr/jalr
handleJump(_args.cpu, _args.registers, _args.fun == 8 ? 0 : rdReg, rs);
return (newMemRoot_, memUpdated_, memAddr_);
}
if (_args.fun == 0xa) {
// movz
handleRd(_args.cpu, _args.registers, rdReg, rs, rt == 0);
return (newMemRoot_, memUpdated_, memAddr_);
}
if (_args.fun == 0xb) {
// movn
handleRd(_args.cpu, _args.registers, rdReg, rs, rt != 0);
return (newMemRoot_, memUpdated_, memAddr_);
}
// lo and hi registers
// can write back
if (_args.fun >= 0x10 && _args.fun < funSel) {
handleHiLo({
_cpu: _args.cpu,
_registers: _args.registers,
_fun: _args.fun,
_rs: rs,
_rt: rt,
_storeReg: rdReg
});
return (newMemRoot_, memUpdated_, memAddr_);
}
}
// write memory
if (storeAddr != U64_MASK) {
newMemRoot_ = MIPS64Memory.writeMem(storeAddr, _args.memProofOffset, val);
memUpdated_ = true;
memAddr_ = storeAddr;
}
// write back the value to destination register
handleRd(_args.cpu, _args.registers, rdReg, val, true);
return (newMemRoot_, memUpdated_, memAddr_);
}
}
function signExtendImmediate(uint32 _insn) internal pure returns (uint64 offset_) {
unchecked {
return signExtend(_insn & 0xFFFF, 16);
}
}
/// @notice Execute an instruction.
function executeMipsInstruction(
uint32 _insn,
uint32 _opcode,
uint32 _fun,
uint64 _rs,
uint64 _rt,
uint64 _mem
)
internal
pure
returns (uint64 out_)
{
unchecked {
if (_opcode == 0 || (_opcode >= 8 && _opcode < 0xF) || _opcode == 0x18 || _opcode == 0x19) {
assembly {
// transform ArithLogI to SPECIAL
switch _opcode
// addi
case 0x8 { _fun := 0x20 }
// addiu
case 0x9 { _fun := 0x21 }
// stli
case 0xA { _fun := 0x2A }
// sltiu
case 0xB { _fun := 0x2B }
// andi
case 0xC { _fun := 0x24 }
// ori
case 0xD { _fun := 0x25 }
// xori
case 0xE { _fun := 0x26 }
// daddi
case 0x18 { _fun := 0x2C }
// daddiu
case 0x19 { _fun := 0x2D }
}
// sll
if (_fun == 0x00) {
return signExtend((_rt & U32_MASK) << ((_insn >> 6) & 0x1F), 32);
}
// srl
else if (_fun == 0x02) {
return signExtend((_rt & U32_MASK) >> ((_insn >> 6) & 0x1F), 32);
}
// sra
else if (_fun == 0x03) {
uint32 shamt = (_insn >> 6) & 0x1F;
return signExtend((_rt & U32_MASK) >> shamt, 32 - shamt);
}
// sllv
else if (_fun == 0x04) {
return signExtend((_rt & U32_MASK) << (_rs & 0x1F), 32);
}
// srlv
else if (_fun == 0x6) {
return signExtend((_rt & U32_MASK) >> (_rs & 0x1F), 32);
}
// srav
else if (_fun == 0x07) {
// shamt here is different than the typical shamt which comes from the
// instruction itself, here it comes from the rs register
uint64 shamt = _rs & 0x1F;
return signExtend((_rt & U32_MASK) >> shamt, 32 - shamt);
}
// functs in range [0x8, 0x1b] are handled specially by other functions
// Explicitly enumerate each funct in range to reduce code diff against Go Vm
// jr
else if (_fun == 0x08) {
return _rs;
}
// jalr
else if (_fun == 0x09) {
return _rs;
}
// movz
else if (_fun == 0x0a) {
return _rs;
}
// movn
else if (_fun == 0x0b) {
return _rs;
}
// syscall
else if (_fun == 0x0c) {
return _rs;
}
// 0x0d - break not supported
// sync
else if (_fun == 0x0f) {
return _rs;
}
// mfhi
else if (_fun == 0x10) {
return _rs;
}
// mthi
else if (_fun == 0x11) {
return _rs;
}
// mflo
else if (_fun == 0x12) {
return _rs;
}
// mtlo
else if (_fun == 0x13) {
return _rs;
}
// dsllv
else if (_fun == 0x14) {
return _rt;
}
// dsrlv
else if (_fun == 0x16) {
return _rt;
}
// dsrav
else if (_fun == 0x17) {
return _rt;
}
// mult
else if (_fun == 0x18) {
return _rs;
}
// multu
else if (_fun == 0x19) {
return _rs;
}
// div
else if (_fun == 0x1a) {
return _rs;
}
// divu
else if (_fun == 0x1b) {
return _rs;
}
// dmult
else if (_fun == 0x1c) {
return _rs;
}
// dmultu
else if (_fun == 0x1d) {
return _rs;
}
// ddiv
else if (_fun == 0x1e) {
return _rs;
}
// ddivu
else if (_fun == 0x1f) {
return _rs;
}
// The rest includes transformed R-type arith imm instructions
// add
else if (_fun == 0x20) {
return signExtend(uint64(uint32(_rs) + uint32(_rt)), 32);
}
// addu
else if (_fun == 0x21) {
return signExtend(uint64(uint32(_rs) + uint32(_rt)), 32);
}
// sub
else if (_fun == 0x22) {
return signExtend(uint64(uint32(_rs) - uint32(_rt)), 32);
}
// subu
else if (_fun == 0x23) {
return signExtend(uint64(uint32(_rs) - uint32(_rt)), 32);
}
// and
else if (_fun == 0x24) {
return (_rs & _rt);
}
// or
else if (_fun == 0x25) {
return (_rs | _rt);
}
// xor
else if (_fun == 0x26) {
return (_rs ^ _rt);
}
// nor
else if (_fun == 0x27) {
return ~(_rs | _rt);
}
// slti
else if (_fun == 0x2a) {
return int64(_rs) < int64(_rt) ? 1 : 0;
}
// sltiu
else if (_fun == 0x2b) {
return _rs < _rt ? 1 : 0;
}
// dadd
else if (_fun == 0x2c) {
return (_rs + _rt);
}
// daddu
else if (_fun == 0x2d) {
return (_rs + _rt);
}
// dsub
else if (_fun == 0x2e) {
return (_rs - _rt);
}
// dsubu
else if (_fun == 0x2f) {
return (_rs - _rt);
}
// dsll
else if (_fun == 0x38) {
return _rt << ((_insn >> 6) & 0x1f);
}
// dsrl
else if (_fun == 0x3A) {
return _rt >> ((_insn >> 6) & 0x1f);
}
// dsra
else if (_fun == 0x3B) {
return uint64(int64(_rt) >> ((_insn >> 6) & 0x1f));
}
// dsll32
else if (_fun == 0x3c) {
return _rt << (((_insn >> 6) & 0x1f) + 32);
}
// dsrl32
else if (_fun == 0x3e) {
return _rt >> (((_insn >> 6) & 0x1f) + 32);
}
// dsra32
else if (_fun == 0x3f) {
return uint64(int64(_rt) >> (((_insn >> 6) & 0x1f) + 32));
} else {
revert("MIPS64: invalid instruction");
}
} else {
// SPECIAL2
if (_opcode == 0x1C) {
// mul
if (_fun == 0x2) {
return signExtend(uint32(int32(uint32(_rs)) * int32(uint32(_rt))), 32);
}
// clz, clo
else if (_fun == 0x20 || _fun == 0x21) {
if (_fun == 0x20) {
_rs = ~_rs;
}
uint32 i = 0;
while (_rs & 0x80000000 != 0) {
i++;
_rs <<= 1;
}
return i;
}
}
// lui
else if (_opcode == 0x0F) {
return signExtend(_rt << 16, 32);
}
// lb
else if (_opcode == 0x20) {
return selectSubWord(_rs, _mem, 1, true);
}
// lh
else if (_opcode == 0x21) {
return selectSubWord(_rs, _mem, 2, true);
}
// lwl
else if (_opcode == 0x22) {
uint32 w = uint32(selectSubWord(_rs, _mem, 4, false));
uint32 val = w << uint32((_rs & 3) * 8);
uint64 mask = uint64(U32_MASK << uint32((_rs & 3) * 8));
return signExtend(((_rt & ~mask) | uint64(val)) & U32_MASK, 32);
}
// lw
else if (_opcode == 0x23) {
return selectSubWord(_rs, _mem, 4, true);
}
// lbu
else if (_opcode == 0x24) {
return selectSubWord(_rs, _mem, 1, false);
}
// lhu
else if (_opcode == 0x25) {
return selectSubWord(_rs, _mem, 2, false);
}
// lwr
else if (_opcode == 0x26) {
uint32 w = uint32(selectSubWord(_rs, _mem, 4, false));
uint32 val = w >> (24 - (_rs & 3) * 8);
uint32 mask = U32_MASK >> (24 - (_rs & 3) * 8);
uint64 lwrResult = (uint32(_rt) & ~mask) | val;
if (_rs & 3 == 3) {
// loaded bit 31
return signExtend(uint64(lwrResult), 32);
} else {
// NOTE: cannon64 implementation specific: We leave the upper word untouched
uint64 rtMask = 0xFF_FF_FF_FF_00_00_00_00;
return ((_rt & rtMask) | uint64(lwrResult));
}
}
// sb
else if (_opcode == 0x28) {
return updateSubWord(_rs, _mem, 1, _rt);
}
// sh
else if (_opcode == 0x29) {
return updateSubWord(_rs, _mem, 2, _rt);
}
// swl
else if (_opcode == 0x2a) {
uint64 sr = (_rs & 3) << 3;
uint64 val = ((_rt & U32_MASK) >> sr) << (32 - ((_rs & 0x4) << 3));
uint64 mask = (uint64(U32_MASK) >> sr) << (32 - ((_rs & 0x4) << 3));
return (_mem & ~mask) | val;
}
// sw
else if (_opcode == 0x2b) {
return updateSubWord(_rs, _mem, 4, _rt);
}
// swr
else if (_opcode == 0x2e) {
uint32 w = uint32(selectSubWord(_rs, _mem, 4, false));
uint64 val = _rt << (24 - (_rs & 3) * 8);
uint64 mask = U32_MASK << uint32(24 - (_rs & 3) * 8);
uint64 swrResult = (w & ~mask) | uint32(val);
return updateSubWord(_rs, _mem, 4, swrResult);
}
// MIPS64
// ldl
else if (_opcode == 0x1a) {
uint64 sl = (_rs & 0x7) << 3;
uint64 val = _mem << sl;
uint64 mask = U64_MASK << sl;
return (val | (_rt & ~mask));
}
// ldr
else if (_opcode == 0x1b) {
uint64 sr = 56 - ((_rs & 0x7) << 3);
uint64 val = _mem >> sr;
uint64 mask = U64_MASK << (64 - sr);
return (val | (_rt & mask));
}
// lwu
else if (_opcode == 0x27) {
return ((_mem >> (32 - ((_rs & 0x4) << 3))) & U32_MASK);
}
// sdl
else if (_opcode == 0x2c) {
uint64 sr = (_rs & 0x7) << 3;
uint64 val = _rt >> sr;
uint64 mask = U64_MASK >> sr;
return (val | (_mem & ~mask));
}
// sdr
else if (_opcode == 0x2d) {
uint64 sl = 56 - ((_rs & 0x7) << 3);
uint64 val = _rt << sl;
uint64 mask = U64_MASK << sl;
return (val | (_mem & ~mask));
}
// ld
else if (_opcode == 0x37) {
return _mem;
}
// sd
else if (_opcode == 0x3F) {
return _rt;
} else {
revert("MIPS64: invalid instruction");
}
}
revert("MIPS64: invalid instruction");
}
}
/// @notice Extends the value leftwards with its most significant bit (sign extension).
function signExtend(uint64 _dat, uint64 _idx) internal pure returns (uint64 out_) {
unchecked {
bool isSigned = (_dat >> (_idx - 1)) != 0;
uint256 signed = ((1 << (arch.WORD_SIZE - _idx)) - 1) << _idx;
uint256 mask = (1 << _idx) - 1;
return uint64(_dat & mask | (isSigned ? signed : 0));
}
}
/// @notice Handles a branch instruction, updating the MIPS state PC where needed.
/// @param _cpu Holds the state of cpu scalars pc, nextPC, hi, lo.
/// @param _registers Holds the current state of the cpu registers.
/// @param _opcode The opcode of the branch instruction.
/// @param _insn The instruction to be executed.
/// @param _rtReg The register to be used for the branch.
/// @param _rs The register to be compared with the branch register.
function handleBranch(
st.CpuScalars memory _cpu,
uint64[32] memory _registers,
uint32 _opcode,
uint32 _insn,
uint64 _rtReg,
uint64 _rs
)
internal
pure
{
unchecked {
bool shouldBranch = false;
if (_cpu.nextPC != _cpu.pc + 4) {
revert("MIPS64: branch in delay slot");
}
// beq/bne: Branch on equal / not equal
if (_opcode == 4 || _opcode == 5) {
uint64 rt = _registers[_rtReg];
shouldBranch = (_rs == rt && _opcode == 4) || (_rs != rt && _opcode == 5);
}
// blez: Branches if instruction is less than or equal to zero
else if (_opcode == 6) {
shouldBranch = int64(_rs) <= 0;
}
// bgtz: Branches if instruction is greater than zero
else if (_opcode == 7) {
shouldBranch = int64(_rs) > 0;
}
// bltz/bgez: Branch on less than zero / greater than or equal to zero
else if (_opcode == 1) {
// regimm
uint32 rtv = ((_insn >> 16) & 0x1F);
if (rtv == 0) {
shouldBranch = int64(_rs) < 0;
}
if (rtv == 1) {
shouldBranch = int64(_rs) >= 0;
}
// bgezal (i.e. bal mnemonic)
if (rtv == 0x11) {
shouldBranch = int64(_rs) >= 0;
_registers[REG_RA] = _cpu.pc + 8; // always set regardless of branch taken
}
}
// Update the state's previous PC
uint64 prevPC = _cpu.pc;
// Execute the delay slot first
_cpu.pc = _cpu.nextPC;
// If we should branch, update the PC to the branch target
// Otherwise, proceed to the next instruction
if (shouldBranch) {
_cpu.nextPC = prevPC + 4 + (signExtend(_insn & 0xFFFF, 16) << 2);
} else {
_cpu.nextPC = _cpu.nextPC + 4;
}
}
}
/// @notice Handles HI and LO register instructions. It also additionally handles doubleword variable shift
/// operations
/// @param _cpu Holds the state of cpu scalars pc, nextPC, hi, lo.
/// @param _registers Holds the current state of the cpu registers.
/// @param _fun The function code of the instruction.
/// @param _rs The value of the RS register.
/// @param _rt The value of the RT register.
/// @param _storeReg The register to store the result in.
function handleHiLo(
st.CpuScalars memory _cpu,
uint64[32] memory _registers,
uint32 _fun,
uint64 _rs,
uint64 _rt,
uint64 _storeReg
)
internal
pure
{
unchecked {
uint64 val = 0;
// mfhi: Move the contents of the HI register into the destination
if (_fun == 0x10) {
val = _cpu.hi;
}
// mthi: Move the contents of the source into the HI register
else if (_fun == 0x11) {
_cpu.hi = _rs;
}
// mflo: Move the contents of the LO register into the destination
else if (_fun == 0x12) {
val = _cpu.lo;
}
// mtlo: Move the contents of the source into the LO register
else if (_fun == 0x13) {
_cpu.lo = _rs;
}
// mult: Multiplies `rs` by `rt` and stores the result in HI and LO registers
else if (_fun == 0x18) {
uint64 acc = uint64(int64(int32(uint32(_rs))) * int64(int32(uint32(_rt))));
_cpu.hi = signExtend(uint64(acc >> 32), 32);
_cpu.lo = signExtend(uint64(uint32(acc)), 32);
}
// multu: Unsigned multiplies `rs` by `rt` and stores the result in HI and LO registers
else if (_fun == 0x19) {
uint64 acc = uint64(uint32(_rs)) * uint64(uint32(_rt));
_cpu.hi = signExtend(uint64(acc >> 32), 32);
_cpu.lo = signExtend(uint64(uint32(acc)), 32);
}
// div: Divides `rs` by `rt`.
// Stores the quotient in LO
// And the remainder in HI
else if (_fun == 0x1a) {
if (uint32(_rt) == 0) {
revert("MIPS64: division by zero");
}
_cpu.hi = signExtend(uint32(int32(uint32(_rs)) % int32(uint32(_rt))), 32);
_cpu.lo = signExtend(uint32(int32(uint32(_rs)) / int32(uint32(_rt))), 32);
}
// divu: Unsigned divides `rs` by `rt`.
// Stores the quotient in LO
// And the remainder in HI
else if (_fun == 0x1b) {
if (uint32(_rt) == 0) {
revert("MIPS64: division by zero");
}
_cpu.hi = signExtend(uint64(uint32(_rs) % uint32(_rt)), 32);
_cpu.lo = signExtend(uint64(uint32(_rs) / uint32(_rt)), 32);
}
// dsllv
else if (_fun == 0x14) {
val = _rt << (_rs & 0x3F);
}
// dsrlv
else if (_fun == 0x16) {
val = _rt >> (_rs & 0x3F);
}
// dsrav
else if (_fun == 0x17) {
val = uint64(int64(_rt) >> (_rs & 0x3F));
}
// dmult
else if (_fun == 0x1c) {
int128 res = int128(int64(_rs)) * int128(int64(_rt));
_cpu.hi = uint64(int64(res >> 64));
_cpu.lo = uint64(uint128(res) & U64_MASK);
}
// dmultu
else if (_fun == 0x1d) {
uint128 res = uint128(_rs) * uint128(_rt);
_cpu.hi = uint64(res >> 64);
_cpu.lo = uint64(res);
}
// ddiv
else if (_fun == 0x1e) {
if (_rt == 0) {
revert("MIPS64: division by zero");
}
_cpu.hi = uint64(int64(_rs) % int64(_rt));
_cpu.lo = uint64(int64(_rs) / int64(_rt));
}
// ddivu
else if (_fun == 0x1f) {
if (_rt == 0) {
revert("MIPS64: division by zero");
}
_cpu.hi = _rs % _rt;
_cpu.lo = _rs / _rt;
}
// Store the result in the destination register, if applicable
if (_storeReg != 0) {
_registers[_storeReg] = val;
}
// Update the PC
_cpu.pc = _cpu.nextPC;
_cpu.nextPC = _cpu.nextPC + 4;
}
}
/// @notice Handles a jump instruction, updating the MIPS state PC where needed.
/// @param _cpu Holds the state of cpu scalars pc, nextPC, hi, lo.
/// @param _registers Holds the current state of the cpu registers.
/// @param _linkReg The register to store the link to the instruction after the delay slot instruction.
/// @param _dest The destination to jump to.
function handleJump(
st.CpuScalars memory _cpu,
uint64[32] memory _registers,
uint64 _linkReg,
uint64 _dest
)
internal
pure
{
unchecked {
if (_cpu.nextPC != _cpu.pc + 4) {
revert("MIPS64: jump in delay slot");
}
// Update the next PC to the jump destination.
uint64 prevPC = _cpu.pc;
_cpu.pc = _cpu.nextPC;
_cpu.nextPC = _dest;
// Update the link-register to the instruction after the delay slot instruction.
if (_linkReg != 0) {
_registers[_linkReg] = prevPC + 8;
}
}
}
/// @notice Handles a storing a value into a register.
/// @param _cpu Holds the state of cpu scalars pc, nextPC, hi, lo.
/// @param _registers Holds the current state of the cpu registers.
/// @param _storeReg The register to store the value into.
/// @param _val The value to store.
/// @param _conditional Whether or not the store is conditional.
function handleRd(
st.CpuScalars memory _cpu,
uint64[32] memory _registers,
uint64 _storeReg,
uint64 _val,
bool _conditional
)
internal
pure
{
unchecked {
// The destination register must be valid.
require(_storeReg < 32, "MIPS64: valid register");
// Never write to reg 0, and it can be conditional (movz, movn).
if (_storeReg != 0 && _conditional) {
_registers[_storeReg] = _val;
}
// Update the PC.
_cpu.pc = _cpu.nextPC;
_cpu.nextPC = _cpu.nextPC + 4;
}
}
/// @notice Selects a subword of byteLength size contained in memWord based on the low-order bits of vaddr
/// @param _vaddr The virtual address of the the subword.
/// @param _memWord The full word to select a subword from.
/// @param _byteLength The size of the subword.
/// @param _signExtend Whether to sign extend the selected subwrod.
function selectSubWord(
uint64 _vaddr,
uint64 _memWord,
uint64 _byteLength,
bool _signExtend
)
internal
pure
returns (uint64 retval_)
{
(uint64 dataMask, uint64 bitOffset, uint64 bitLength) = calculateSubWordMaskAndOffset(_vaddr, _byteLength);
retval_ = (_memWord >> bitOffset) & dataMask;
if (_signExtend) {
retval_ = signExtend(retval_, bitLength);
}
return retval_;
}
/// @notice Returns a word that has been updated by the specified subword at bit positions determined by the virtual
/// address
/// @param _vaddr The virtual address of the subword.
/// @param _memWord The full word to update.
/// @param _byteLength The size of the subword.
/// @param _value The subword that updates _memWord.
function updateSubWord(
uint64 _vaddr,
uint64 _memWord,
uint64 _byteLength,
uint64 _value
)
internal
pure
returns (uint64 word_)
{
(uint64 dataMask, uint64 bitOffset,) = calculateSubWordMaskAndOffset(_vaddr, _byteLength);
uint64 subWordValue = dataMask & _value;
uint64 memUpdateMask = dataMask << bitOffset;
return subWordValue << bitOffset | (~memUpdateMask) & _memWord;
}
function calculateSubWordMaskAndOffset(
uint64 _vaddr,
uint64 _byteLength
)
internal
pure
returns (uint64 dataMask_, uint64 bitOffset_, uint64 bitLength_)
{
uint64 bitLength = _byteLength << 3;
uint64 dataMask = ~uint64(0) >> (arch.WORD_SIZE - bitLength);
// Figure out sub-word index based on the low-order bits in vaddr
uint64 byteIndexMask = _vaddr & arch.EXT_MASK & ~(_byteLength - 1);
uint64 maxByteShift = arch.WORD_SIZE_BYTES - _byteLength;
uint64 byteIndex = _vaddr & byteIndexMask;
uint64 bitOffset = (maxByteShift - byteIndex) << 3;
return (dataMask, bitOffset, bitLength);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { InvalidExitedValue } from "src/cannon/libraries/CannonErrors.sol";
library MIPS64State {
struct CpuScalars {
uint64 pc;
uint64 nextPC;
uint64 lo;
uint64 hi;
}
function assertExitedIsValid(uint32 _exited) internal pure {
if (_exited > 1) {
revert InvalidExitedValue();
}
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol";
import { MIPS64State as st } from "src/cannon/libraries/MIPS64State.sol";
import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol";
import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol";
import { MIPS64Arch as arch } from "src/cannon/libraries/MIPS64Arch.sol";
library MIPS64Syscalls {
struct SysReadParams {
/// @param _a0 The file descriptor.
uint64 a0;
/// @param _a1 The memory location where data should be read to.
uint64 a1;
/// @param _a2 The number of bytes to read from the file
uint64 a2;
/// @param _preimageKey The key of the preimage to read.
bytes32 preimageKey;
/// @param _preimageOffset The offset of the preimage to read.
uint64 preimageOffset;
/// @param _localContext The local context for the preimage key.
bytes32 localContext;
/// @param _oracle The address of the preimage oracle.
IPreimageOracle oracle;
/// @param _proofOffset The offset of the memory proof in calldata.
uint256 proofOffset;
/// @param _memRoot The current memory root.
bytes32 memRoot;
}
uint64 internal constant U64_MASK = 0xFFffFFffFFffFFff;
uint64 internal constant PAGE_ADDR_MASK = 4095;
uint64 internal constant PAGE_SIZE = 4096;
uint32 internal constant SYS_MMAP = 5009;
uint32 internal constant SYS_BRK = 5012;
uint32 internal constant SYS_CLONE = 5055;
uint32 internal constant SYS_EXIT_GROUP = 5205;
uint32 internal constant SYS_READ = 5000;
uint32 internal constant SYS_WRITE = 5001;
uint32 internal constant SYS_FCNTL = 5070;
uint32 internal constant SYS_EXIT = 5058;
uint32 internal constant SYS_SCHED_YIELD = 5023;
uint32 internal constant SYS_GETTID = 5178;
uint32 internal constant SYS_FUTEX = 5194;
uint32 internal constant SYS_OPEN = 5002;
uint32 internal constant SYS_NANOSLEEP = 5034;
uint32 internal constant SYS_CLOCKGETTIME = 5222;
uint32 internal constant SYS_GETPID = 5038;
// no-op syscalls
uint32 internal constant SYS_MUNMAP = 5011;
uint32 internal constant SYS_GETAFFINITY = 5196;
uint32 internal constant SYS_MADVISE = 5027;
uint32 internal constant SYS_RTSIGPROCMASK = 5014;
uint32 internal constant SYS_SIGALTSTACK = 5129;
uint32 internal constant SYS_RTSIGACTION = 5013;
uint32 internal constant SYS_PRLIMIT64 = 5297;
uint32 internal constant SYS_CLOSE = 5003;
uint32 internal constant SYS_PREAD64 = 5016;
uint32 internal constant SYS_FSTAT = 5005;
//uint32 internal constant SYS_FSTAT64 = 0xFFFFFFFF; // UndefinedSysNr - not supported by MIPS64
uint32 internal constant SYS_OPENAT = 5247;
uint32 internal constant SYS_READLINK = 5087;
uint32 internal constant SYS_READLINKAT = 5257;
uint32 internal constant SYS_IOCTL = 5015;
uint32 internal constant SYS_EPOLLCREATE1 = 5285;
uint32 internal constant SYS_PIPE2 = 5287;
uint32 internal constant SYS_EPOLLCTL = 5208;
uint32 internal constant SYS_EPOLLPWAIT = 5272;
uint32 internal constant SYS_GETRANDOM = 5313;
uint32 internal constant SYS_UNAME = 5061;
//uint32 internal constant SYS_STAT64 = 0xFFFFFFFF; // UndefinedSysNr - not supported by MIPS64
uint32 internal constant SYS_GETUID = 5100;
uint32 internal constant SYS_GETGID = 5102;
//uint32 internal constant SYS_LLSEEK = 0xFFFFFFFF; // UndefinedSysNr - not supported by MIPS64
uint32 internal constant SYS_MINCORE = 5026;
uint32 internal constant SYS_TGKILL = 5225;
uint32 internal constant SYS_GETRLIMIT = 5095;
uint32 internal constant SYS_LSEEK = 5008;
// profiling-related syscalls - ignored
uint32 internal constant SYS_SETITIMER = 5036;
uint32 internal constant SYS_TIMERCREATE = 5216;
uint32 internal constant SYS_TIMERSETTIME = 5217;
uint32 internal constant SYS_TIMERDELETE = 5220;
uint32 internal constant FD_STDIN = 0;
uint32 internal constant FD_STDOUT = 1;
uint32 internal constant FD_STDERR = 2;
uint32 internal constant FD_HINT_READ = 3;
uint32 internal constant FD_HINT_WRITE = 4;
uint32 internal constant FD_PREIMAGE_READ = 5;
uint32 internal constant FD_PREIMAGE_WRITE = 6;
uint64 internal constant SYS_ERROR_SIGNAL = U64_MASK;
uint64 internal constant EBADF = 0x9;
uint64 internal constant EINVAL = 0x16;
uint64 internal constant EAGAIN = 0xb;
uint64 internal constant ETIMEDOUT = 0x91;
uint64 internal constant FUTEX_WAIT_PRIVATE = 128;
uint64 internal constant FUTEX_WAKE_PRIVATE = 129;
uint64 internal constant FUTEX_TIMEOUT_STEPS = 10000;
uint64 internal constant FUTEX_NO_TIMEOUT = type(uint64).max;
uint64 internal constant FUTEX_EMPTY_ADDR = U64_MASK;
uint64 internal constant SCHED_QUANTUM = 100_000;
uint64 internal constant HZ = 10_000_000;
uint64 internal constant CLOCK_GETTIME_REALTIME_FLAG = 0;
uint64 internal constant CLOCK_GETTIME_MONOTONIC_FLAG = 1;
/// @notice Start of the data segment.
uint64 internal constant PROGRAM_BREAK = 0x00_00_40_00_00_00_00_00;
uint64 internal constant HEAP_END = 0x00_00_60_00_00_00_00_00;
// SYS_CLONE flags
uint64 internal constant CLONE_VM = 0x100;
uint64 internal constant CLONE_FS = 0x200;
uint64 internal constant CLONE_FILES = 0x400;
uint64 internal constant CLONE_SIGHAND = 0x800;
uint64 internal constant CLONE_PTRACE = 0x2000;
uint64 internal constant CLONE_VFORK = 0x4000;
uint64 internal constant CLONE_PARENT = 0x8000;
uint64 internal constant CLONE_THREAD = 0x10000;
uint64 internal constant CLONE_NEWNS = 0x20000;
uint64 internal constant CLONE_SYSVSEM = 0x40000;
uint64 internal constant CLONE_SETTLS = 0x80000;
uint64 internal constant CLONE_PARENTSETTID = 0x100000;
uint64 internal constant CLONE_CHILDCLEARTID = 0x200000;
uint64 internal constant CLONE_UNTRACED = 0x800000;
uint64 internal constant CLONE_CHILDSETTID = 0x1000000;
uint64 internal constant CLONE_STOPPED = 0x2000000;
uint64 internal constant CLONE_NEWUTS = 0x4000000;
uint64 internal constant CLONE_NEWIPC = 0x8000000;
uint64 internal constant VALID_SYS_CLONE_FLAGS =
CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | CLONE_THREAD;
// FYI: https://en.wikibooks.org/wiki/MIPS_Assembly/Register_File
// https://refspecs.linuxfoundation.org/elf/mipsabi.pdf
uint32 internal constant REG_V0 = 2;
uint32 internal constant REG_A0 = 4;
uint32 internal constant REG_A1 = 5;
uint32 internal constant REG_A2 = 6;
uint32 internal constant REG_A3 = 7;
// FYI: https://web.archive.org/web/20231223163047/https://www.linux-mips.org/wiki/Syscall
uint32 internal constant REG_SYSCALL_NUM = REG_V0;
uint32 internal constant REG_SYSCALL_ERRNO = REG_A3;
uint32 internal constant REG_SYSCALL_RET1 = REG_V0;
uint32 internal constant REG_SYSCALL_PARAM1 = REG_A0;
uint32 internal constant REG_SYSCALL_PARAM2 = REG_A1;
uint32 internal constant REG_SYSCALL_PARAM3 = REG_A2;
uint32 internal constant REG_SYSCALL_PARAM4 = REG_A3;
// Constants copied from MIPS64Arch for use in Yul
uint64 internal constant WORD_SIZE_BYTES = 8;
uint64 internal constant EXT_MASK = 0x7;
/// @notice Extract syscall num and arguments from registers.
/// @param _registers The cpu registers.
/// @return sysCallNum_ The syscall number.
/// @return a0_ The first argument available to the syscall operation.
/// @return a1_ The second argument available to the syscall operation.
/// @return a2_ The third argument available to the syscall operation.
/// @return a3_ The fourth argument available to the syscall operation.
function getSyscallArgs(uint64[32] memory _registers)
internal
pure
returns (uint64 sysCallNum_, uint64 a0_, uint64 a1_, uint64 a2_, uint64 a3_)
{
unchecked {
sysCallNum_ = _registers[REG_SYSCALL_NUM];
a0_ = _registers[REG_SYSCALL_PARAM1];
a1_ = _registers[REG_SYSCALL_PARAM2];
a2_ = _registers[REG_SYSCALL_PARAM3];
a3_ = _registers[REG_SYSCALL_PARAM4];
return (sysCallNum_, a0_, a1_, a2_, a3_);
}
}
/// @notice Like a Linux mmap syscall. Allocates a page from the heap.
/// @param _a0 The address for the new mapping
/// @param _a1 The size of the new mapping
/// @param _heap The current value of the heap pointer
/// @return v0_ The address of the new mapping
/// @return v1_ Unused error code (0)
/// @return newHeap_ The new value for the heap, may be unchanged
function handleSysMmap(
uint64 _a0,
uint64 _a1,
uint64 _heap
)
internal
pure
returns (uint64 v0_, uint64 v1_, uint64 newHeap_)
{
unchecked {
v1_ = uint64(0);
newHeap_ = _heap;
uint64 sz = _a1;
if (sz & PAGE_ADDR_MASK != 0) {
// adjust size to align with page size
sz += PAGE_SIZE - (sz & PAGE_ADDR_MASK);
}
if (_a0 == 0) {
v0_ = _heap;
newHeap_ += sz;
// Fail if new heap exceeds memory limit, newHeap overflows to low memory, or sz overflows
if (newHeap_ > HEAP_END || newHeap_ < _heap || sz < _a1) {
v0_ = SYS_ERROR_SIGNAL;
v1_ = EINVAL;
return (v0_, v1_, _heap);
}
} else {
v0_ = _a0;
}
return (v0_, v1_, newHeap_);
}
}
/// @notice Like a Linux read syscall. Splits unaligned reads into aligned reads.
/// Args are provided as a struct to reduce stack pressure.
/// @return v0_ The number of bytes read, -1 on error.
/// @return v1_ The error code, 0 if there is no error.
/// @return newPreimageOffset_ The new value for the preimage offset.
/// @return newMemRoot_ The new memory root.
function handleSysRead(SysReadParams memory _args)
internal
view
returns (
uint64 v0_,
uint64 v1_,
uint64 newPreimageOffset_,
bytes32 newMemRoot_,
bool memUpdated_,
uint64 memAddr_
)
{
unchecked {
v0_ = uint64(0);
v1_ = uint64(0);
newMemRoot_ = _args.memRoot;
newPreimageOffset_ = _args.preimageOffset;
memUpdated_ = false;
memAddr_ = 0;
// args: _a0 = fd, _a1 = addr, _a2 = count
// returns: v0_ = read, v1_ = err code
if (_args.a0 == FD_STDIN) {
// Leave v0_ and v1_ zero: read nothing, no error
}
// pre-image oracle read
else if (_args.a0 == FD_PREIMAGE_READ) {
uint64 effAddr = _args.a1 & arch.ADDRESS_MASK;
// verify proof is correct, and get the existing memory.
// mask the addr to align it to 4 bytes
uint64 mem = MIPS64Memory.readMem(_args.memRoot, effAddr, _args.proofOffset);
// If the preimage key is a local key, localize it in the context of the caller.
if (uint8(_args.preimageKey[0]) == 1) {
_args.preimageKey = PreimageKeyLib.localize(_args.preimageKey, _args.localContext);
}
(bytes32 dat, uint256 datLen) = _args.oracle.readPreimage(_args.preimageKey, _args.preimageOffset);
// Transform data for writing to memory
// We use assembly for more precise ops, and no var count limit
uint64 a1 = _args.a1;
uint64 a2 = _args.a2;
assembly {
let alignment := and(a1, EXT_MASK) // the read might not start at an aligned address
let space := sub(WORD_SIZE_BYTES, alignment) // remaining space in memory word
if lt(space, datLen) { datLen := space } // if less space than data, shorten data
if lt(a2, datLen) { datLen := a2 } // if requested to read less, read less
dat := shr(sub(256, mul(datLen, 8)), dat) // right-align data
// position data to insert into memory word
dat := shl(mul(sub(sub(WORD_SIZE_BYTES, datLen), alignment), 8), dat)
// mask all bytes after start
let mask := sub(shl(mul(sub(WORD_SIZE_BYTES, alignment), 8), 1), 1)
// mask of all bytes
let suffixMask := sub(shl(mul(sub(sub(WORD_SIZE_BYTES, alignment), datLen), 8), 1), 1)
// starting from end, maybe none
mask := and(mask, not(suffixMask)) // reduce mask to just cover the data we insert
mem := or(and(mem, not(mask)), dat) // clear masked part of original memory, and insert data
}
// Write memory back
newMemRoot_ = MIPS64Memory.writeMem(effAddr, _args.proofOffset, mem);
memUpdated_ = true;
memAddr_ = effAddr;
newPreimageOffset_ += uint64(datLen);
v0_ = uint64(datLen);
}
// hint response
else if (_args.a0 == FD_HINT_READ) {
// Don't read into memory, just say we read it all
// The result is ignored anyway
v0_ = _args.a2;
} else {
v0_ = U64_MASK;
v1_ = EBADF;
}
return (v0_, v1_, newPreimageOffset_, newMemRoot_, memUpdated_, memAddr_);
}
}
/// @notice Like a Linux write syscall. Splits unaligned writes into aligned writes.
/// @param _a0 The file descriptor.
/// @param _a1 The memory address to read from.
/// @param _a2 The number of bytes to read.
/// @param _preimageKey The current preimaageKey.
/// @param _preimageOffset The current preimageOffset.
/// @param _proofOffset The offset of the memory proof in calldata.
/// @param _memRoot The current memory root.
/// @return v0_ The number of bytes written, or -1 on error.
/// @return v1_ The error code, or 0 if empty.
/// @return newPreimageKey_ The new preimageKey.
/// @return newPreimageOffset_ The new preimageOffset.
function handleSysWrite(
uint64 _a0,
uint64 _a1,
uint64 _a2,
bytes32 _preimageKey,
uint64 _preimageOffset,
uint256 _proofOffset,
bytes32 _memRoot
)
internal
pure
returns (uint64 v0_, uint64 v1_, bytes32 newPreimageKey_, uint64 newPreimageOffset_)
{
unchecked {
// args: _a0 = fd, _a1 = addr, _a2 = count
// returns: v0_ = written, v1_ = err code
v0_ = uint64(0);
v1_ = uint64(0);
newPreimageKey_ = _preimageKey;
newPreimageOffset_ = _preimageOffset;
if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_HINT_WRITE) {
v0_ = _a2; // tell program we have written everything
}
// pre-image oracle
else if (_a0 == FD_PREIMAGE_WRITE) {
// mask the addr to align it to 4 bytes
uint64 mem = MIPS64Memory.readMem(_memRoot, _a1 & arch.ADDRESS_MASK, _proofOffset);
bytes32 key = _preimageKey;
// Construct pre-image key from memory
// We use assembly for more precise ops, and no var count limit
assembly {
let alignment := and(_a1, EXT_MASK) // the read might not start at an aligned address
let space := sub(WORD_SIZE_BYTES, alignment) // remaining space in memory word
if lt(space, _a2) { _a2 := space } // if less space than data, shorten data
key := shl(mul(_a2, 8), key) // shift key, make space for new info
let mask := sub(shl(mul(_a2, 8), 1), 1) // mask for extracting value from memory
mem := and(shr(mul(sub(space, _a2), 8), mem), mask) // align value to right, mask it
key := or(key, mem) // insert into key
}
// Write pre-image key to oracle
newPreimageKey_ = key;
newPreimageOffset_ = 0; // reset offset, to read new pre-image data from the start
v0_ = _a2;
} else {
v0_ = U64_MASK;
v1_ = EBADF;
}
return (v0_, v1_, newPreimageKey_, newPreimageOffset_);
}
}
/// @notice Like Linux fcntl (file control) syscall, but only supports minimal file-descriptor control commands, to
/// retrieve the file-descriptor R/W flags.
/// @param _a0 The file descriptor.
/// @param _a1 The control command.
/// @param v0_ The file status flag (only supported commands are F_GETFD and F_GETFL), or -1 on error.
/// @param v1_ An error number, or 0 if there is no error.
function handleSysFcntl(uint64 _a0, uint64 _a1) internal pure returns (uint64 v0_, uint64 v1_) {
unchecked {
v0_ = uint64(0);
v1_ = uint64(0);
// args: _a0 = fd, _a1 = cmd
if (_a1 == 1) {
// F_GETFD: get file descriptor flags
if (
_a0 == FD_STDIN || _a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_READ
|| _a0 == FD_HINT_READ || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE
) {
v0_ = 0; // No flags set
} else {
v0_ = U64_MASK;
v1_ = EBADF;
}
} else if (_a1 == 3) {
// F_GETFL: get file status flags
if (_a0 == FD_STDIN || _a0 == FD_PREIMAGE_READ || _a0 == FD_HINT_READ) {
v0_ = 0; // O_RDONLY
} else if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE) {
v0_ = 1; // O_WRONLY
} else {
v0_ = U64_MASK;
v1_ = EBADF;
}
} else {
v0_ = U64_MASK;
v1_ = EINVAL; // cmd not recognized by this kernel
}
return (v0_, v1_);
}
}
function handleSyscallUpdates(
st.CpuScalars memory _cpu,
uint64[32] memory _registers,
uint64 _v0,
uint64 _v1
)
internal
pure
{
unchecked {
// Write the results back to the state registers
_registers[REG_SYSCALL_RET1] = _v0;
_registers[REG_SYSCALL_ERRNO] = _v1;
// Update the PC and nextPC
_cpu.pc = _cpu.nextPC;
_cpu.nextPC = _cpu.nextPC + 4;
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment