Commit cc2715c3 authored by mbaxter's avatar mbaxter Committed by GitHub

cannon: Add more sync tests (#12949)

* cannon: Port go map tests

* cannon: Port pool_test.go

* cannon: Port a few more mutex tests

* cannon: Port waitgroup_test.go

* cannon: Port oncefunc_test.go (in progress)

* cannon: Port atomic_test.go (in progress)

* cannon: Port value_test.go (in progress)

* cannon: Fix atomic tests by using a test mock

* cannon: Fix test fail behavior

* cannon: Move test util to a shared module

* cannon: Use common testutil throughout

* cannon: Fix failing tests

* cannon: Add sanity check test program for test runner utils

* cannon: Add more util tests, fix step counts

* cannon: Rename test util method

* cannon: Fix panic, error handling in testutil

* cannon: Dedupe test running code

* cannon: Simplify testutil interface

* cannon: Mark mt tests as slow

* cannon: Cut debugging code from test

* cannon: Validate gc complete output

* cannon: Synchronize access to bool values
parent c29b2094
...@@ -169,7 +169,7 @@ func SignExtendImmediate(insn uint32) Word { ...@@ -169,7 +169,7 @@ func SignExtendImmediate(insn uint32) Word {
func assertMips64(insn uint32) { func assertMips64(insn uint32) {
if arch.IsMips32 { if arch.IsMips32 {
panic(fmt.Sprintf("invalid instruction: %x", insn)) panic(fmt.Sprintf("invalid instruction: 0x%08x", insn))
} }
} }
...@@ -327,7 +327,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem ...@@ -327,7 +327,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem
assertMips64(insn) assertMips64(insn)
return Word(int64(rt) >> (((insn >> 6) & 0x1f) + 32)) return Word(int64(rt) >> (((insn >> 6) & 0x1f) + 32))
default: default:
panic(fmt.Sprintf("invalid instruction: %x", insn)) panic(fmt.Sprintf("invalid instruction: 0x%08x", insn))
} }
} else { } else {
switch opcode { switch opcode {
......
...@@ -34,20 +34,82 @@ func TestInstrumentedState_Claim(t *testing.T) { ...@@ -34,20 +34,82 @@ func TestInstrumentedState_Claim(t *testing.T) {
testutil.RunVMTest_Claim(t, CreateInitialState, vmFactory, false) testutil.RunVMTest_Claim(t, CreateInitialState, vmFactory, false)
} }
func TestInstrumentedState_UtilsCheck(t *testing.T) {
// Sanity check that test running utilities will return a non-zero exit code on failure
t.Parallel()
cases := []struct {
name string
expectedOutput string
}{
{name: "utilscheck", expectedOutput: "Test failed: ShouldFail"},
{name: "utilscheck2", expectedOutput: "Test failed: ShouldFail (subtest 2)"},
{name: "utilscheck3", expectedOutput: "Test panicked: ShouldFail (panic test)"},
{name: "utilscheck4", expectedOutput: "Test panicked: ShouldFail"},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(c.name), CreateInitialState, false)
oracle := testutil.StaticOracle(t, []byte{})
var stdOutBuf, stdErrBuf bytes.Buffer
us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta)
for i := 0; i < 1_000_000; i++ {
if us.GetState().GetExited() {
break
}
_, err := us.Step(false)
require.NoError(t, err)
}
t.Logf("Completed in %d steps", state.Step)
require.True(t, state.Exited, "must complete program")
require.Equal(t, uint8(1), state.ExitCode, "exit with 1")
require.Contains(t, stdOutBuf.String(), c.expectedOutput)
require.NotContains(t, stdOutBuf.String(), "Passed test that should have failed")
require.Equal(t, "", stdErrBuf.String(), "should not print any errors")
})
}
}
func TestInstrumentedState_MultithreadedProgram(t *testing.T) { func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
if os.Getenv("SKIP_SLOW_TESTS") == "true" {
t.Skip("Skipping slow test because SKIP_SLOW_TESTS is enabled")
}
t.Parallel() t.Parallel()
cases := []struct { cases := []struct {
name string name string
expectedOutput []string expectedOutput []string
programName string programName string
steps int
}{ }{
{ {
name: "wg and chan test", name: "general concurrency test",
expectedOutput: []string{ expectedOutput: []string{
"waitgroup result: 42", "waitgroup result: 42",
"channels result: 1234", "channels result: 1234",
"GC complete!",
},
programName: "mt-general",
steps: 5_000_000,
},
{
name: "atomic test",
expectedOutput: []string{
"Atomic tests passed",
},
programName: "mt-atomic",
steps: 350_000_000,
},
{
name: "waitgroup test",
expectedOutput: []string{
"WaitGroup tests passed",
}, },
programName: "mt-wg", programName: "mt-wg",
steps: 15_000_000,
}, },
{ {
name: "mutex test", name: "mutex test",
...@@ -55,6 +117,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -55,6 +117,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"Mutex test passed", "Mutex test passed",
}, },
programName: "mt-mutex", programName: "mt-mutex",
steps: 5_000_000,
}, },
{ {
name: "cond test", name: "cond test",
...@@ -62,6 +125,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -62,6 +125,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"Cond test passed", "Cond test passed",
}, },
programName: "mt-cond", programName: "mt-cond",
steps: 5_000_000,
}, },
{ {
name: "rwmutex test", name: "rwmutex test",
...@@ -69,6 +133,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -69,6 +133,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"RWMutex test passed", "RWMutex test passed",
}, },
programName: "mt-rwmutex", programName: "mt-rwmutex",
steps: 5_000_000,
}, },
{ {
name: "once test", name: "once test",
...@@ -76,6 +141,15 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -76,6 +141,15 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"Once test passed", "Once test passed",
}, },
programName: "mt-once", programName: "mt-once",
steps: 5_000_000,
},
{
name: "oncefunc test",
expectedOutput: []string{
"OnceFunc tests passed",
},
programName: "mt-oncefunc",
steps: 15_000_000,
}, },
{ {
name: "map test", name: "map test",
...@@ -83,6 +157,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -83,6 +157,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"Map test passed", "Map test passed",
}, },
programName: "mt-map", programName: "mt-map",
steps: 100_000_000,
}, },
{ {
name: "pool test", name: "pool test",
...@@ -90,6 +165,15 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -90,6 +165,15 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
"Pool test passed", "Pool test passed",
}, },
programName: "mt-pool", programName: "mt-pool",
steps: 50_000_000,
},
{
name: "value test",
expectedOutput: []string{
"Value tests passed",
},
programName: "mt-value",
steps: 3_000_000,
}, },
} }
...@@ -97,12 +181,14 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { ...@@ -97,12 +181,14 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) {
test := test test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()
state, _ := testutil.LoadELFProgram(t, testutil.ProgramPath(test.programName), CreateInitialState, false)
state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(test.programName), CreateInitialState, false)
oracle := testutil.StaticOracle(t, []byte{}) oracle := testutil.StaticOracle(t, []byte{})
var stdOutBuf, stdErrBuf bytes.Buffer var stdOutBuf, stdErrBuf bytes.Buffer
us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), nil) us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta)
for i := 0; i < 5_000_000; i++ {
for i := 0; i < test.steps; i++ {
if us.GetState().GetExited() { if us.GetState().GetExited() {
break break
} }
......
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/atomic_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"fmt"
"reflect"
"runtime"
"runtime/debug"
"strings"
. "sync/atomic"
"testing"
"unsafe"
"utils/testutil"
)
// Tests of correct behavior, without contention.
// (Does the function work as advertised?)
//
// Test that the Add functions add correctly.
// Test that the CompareAndSwap functions actually
// do the comparison and the swap correctly.
//
// The loop over power-of-two values is meant to
// ensure that the operations apply to the full word size.
// The struct fields x.before and x.after check that the
// operations do not extend past the full word size.
const (
magic32 = 0xdedbeef
magic64 = 0xdeddeadbeefbeef
)
func TestSwapInt32(t *testutil.TestRunner) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
var j int32
for delta := int32(1); delta+delta > delta; delta += delta {
k := SwapInt32(&x.i, delta)
if x.i != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
j = delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestSwapInt32Method(t *testutil.TestRunner) {
var x struct {
before int32
i Int32
after int32
}
x.before = magic32
x.after = magic32
var j int32
for delta := int32(1); delta+delta > delta; delta += delta {
k := x.i.Swap(delta)
if x.i.Load() != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
j = delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestSwapUint32(t *testutil.TestRunner) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
var j uint32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := SwapUint32(&x.i, delta)
if x.i != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
j = delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestSwapUint32Method(t *testutil.TestRunner) {
var x struct {
before uint32
i Uint32
after uint32
}
x.before = magic32
x.after = magic32
var j uint32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := x.i.Swap(delta)
if x.i.Load() != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
j = delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestSwapInt64(t *testutil.TestRunner) {
var x struct {
before int64
i int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
var j int64
for delta := int64(1); delta+delta > delta; delta += delta {
k := SwapInt64(&x.i, delta)
if x.i != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
j = delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestSwapInt64Method(t *testutil.TestRunner) {
var x struct {
before int64
i Int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
var j int64
for delta := int64(1); delta+delta > delta; delta += delta {
k := x.i.Swap(delta)
if x.i.Load() != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
j = delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestSwapUint64(t *testutil.TestRunner) {
var x struct {
before uint64
i uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
var j uint64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := SwapUint64(&x.i, delta)
if x.i != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
j = delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestSwapUint64Method(t *testutil.TestRunner) {
var x struct {
before uint64
i Uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
var j uint64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := x.i.Swap(delta)
if x.i.Load() != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
j = delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestSwapUintptr(t *testutil.TestRunner) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j uintptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := SwapUintptr(&x.i, delta)
if x.i != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
j = delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestSwapUintptrMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j uintptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := x.i.Swap(delta)
if x.i.Load() != delta || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
j = delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
var global [1024]byte
func testPointers() []unsafe.Pointer {
var pointers []unsafe.Pointer
// globals
for i := 0; i < 10; i++ {
pointers = append(pointers, unsafe.Pointer(&global[1<<i-1]))
}
// heap
pointers = append(pointers, unsafe.Pointer(new(byte)))
// nil
pointers = append(pointers, nil)
return pointers
}
var short bool = true
func TestSwapPointer(t *testutil.TestRunner) {
var x struct {
before uintptr
i unsafe.Pointer
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j unsafe.Pointer
for _, p := range testPointers() {
k := SwapPointer(&x.i, p)
if x.i != p || k != j {
t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i, j, k)
}
j = p
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestSwapPointerMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Pointer[byte]
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j *byte
for _, p := range testPointers() {
p := (*byte)(p)
k := x.i.Swap(p)
if x.i.Load() != p || k != j {
t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i.Load(), j, k)
}
j = p
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestAddInt32(t *testutil.TestRunner) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
var j int32
for delta := int32(1); delta+delta > delta; delta += delta {
k := AddInt32(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddInt32Method(t *testutil.TestRunner) {
var x struct {
before int32
i Int32
after int32
}
x.before = magic32
x.after = magic32
var j int32
for delta := int32(1); delta+delta > delta; delta += delta {
k := x.i.Add(delta)
j += delta
if x.i.Load() != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddUint32(t *testutil.TestRunner) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
var j uint32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := AddUint32(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddUint32Method(t *testutil.TestRunner) {
var x struct {
before uint32
i Uint32
after uint32
}
x.before = magic32
x.after = magic32
var j uint32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := x.i.Add(delta)
j += delta
if x.i.Load() != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddInt64(t *testutil.TestRunner) {
var x struct {
before int64
i int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
var j int64
for delta := int64(1); delta+delta > delta; delta += delta {
k := AddInt64(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestAddInt64Method(t *testutil.TestRunner) {
var x struct {
before int64
i Int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
var j int64
for delta := int64(1); delta+delta > delta; delta += delta {
k := x.i.Add(delta)
j += delta
if x.i.Load() != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestAddUint64(t *testutil.TestRunner) {
var x struct {
before uint64
i uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
var j uint64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := AddUint64(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestAddUint64Method(t *testutil.TestRunner) {
var x struct {
before uint64
i Uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
var j uint64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := x.i.Add(delta)
j += delta
if x.i.Load() != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestAddUintptr(t *testutil.TestRunner) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j uintptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := AddUintptr(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestAddUintptrMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j uintptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := x.i.Add(delta)
j += delta
if x.i.Load() != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestCompareAndSwapInt32(t *testutil.TestRunner) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
for val := int32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt32(&x.i, val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt32(&x.i, val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapInt32Method(t *testutil.TestRunner) {
var x struct {
before int32
i Int32
after int32
}
x.before = magic32
x.after = magic32
for val := int32(1); val+val > val; val += val {
x.i.Store(val)
if !x.i.CompareAndSwap(val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
x.i.Store(val + 1)
if x.i.CompareAndSwap(val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapUint32(t *testutil.TestRunner) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
for val := uint32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint32(&x.i, val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint32(&x.i, val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapUint32Method(t *testutil.TestRunner) {
var x struct {
before uint32
i Uint32
after uint32
}
x.before = magic32
x.after = magic32
for val := uint32(1); val+val > val; val += val {
x.i.Store(val)
if !x.i.CompareAndSwap(val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
x.i.Store(val + 1)
if x.i.CompareAndSwap(val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapInt64(t *testutil.TestRunner) {
var x struct {
before int64
i int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
for val := int64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt64(&x.i, val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt64(&x.i, val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestCompareAndSwapInt64Method(t *testutil.TestRunner) {
var x struct {
before int64
i Int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
for val := int64(1); val+val > val; val += val {
x.i.Store(val)
if !x.i.CompareAndSwap(val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
x.i.Store(val + 1)
if x.i.CompareAndSwap(val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func testCompareAndSwapUint64(t testing.TB, cas func(*uint64, uint64, uint64) bool) {
var x struct {
before uint64
i uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
for val := uint64(1); val+val > val; val += val {
x.i = val
if !cas(&x.i, val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if cas(&x.i, val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestCompareAndSwapUint64(t *testutil.TestRunner) {
testCompareAndSwapUint64(t, CompareAndSwapUint64)
}
func TestCompareAndSwapUint64Method(t *testutil.TestRunner) {
var x struct {
before uint64
i Uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
for val := uint64(1); val+val > val; val += val {
x.i.Store(val)
if !x.i.CompareAndSwap(val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
x.i.Store(val + 1)
if x.i.CompareAndSwap(val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestCompareAndSwapUintptr(t *testutil.TestRunner) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for val := uintptr(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUintptr(&x.i, val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUintptr(&x.i, val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestCompareAndSwapUintptrMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for val := uintptr(1); val+val > val; val += val {
x.i.Store(val)
if !x.i.CompareAndSwap(val, val+1) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
x.i.Store(val + 1)
if x.i.CompareAndSwap(val, val+2) {
t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i.Load() != val+1 {
t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uintptr(magicptr), uintptr(magicptr))
}
}
func TestCompareAndSwapPointer(t *testutil.TestRunner) {
var x struct {
before uintptr
i unsafe.Pointer
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
q := unsafe.Pointer(new(byte))
for _, p := range testPointers() {
x.i = p
if !CompareAndSwapPointer(&x.i, p, q) {
t.Fatalf("should have swapped %p %p", p, q)
}
if x.i != q {
t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
}
if CompareAndSwapPointer(&x.i, p, nil) {
t.Fatalf("should not have swapped %p nil", p)
}
if x.i != q {
t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestCompareAndSwapPointerMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Pointer[byte]
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
q := new(byte)
for _, p := range testPointers() {
p := (*byte)(p)
x.i.Store(p)
if !x.i.CompareAndSwap(p, q) {
t.Fatalf("should have swapped %p %p", p, q)
}
if x.i.Load() != q {
t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
}
if x.i.CompareAndSwap(p, nil) {
t.Fatalf("should not have swapped %p nil", p)
}
if x.i.Load() != q {
t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestLoadInt32(t *testutil.TestRunner) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
for delta := int32(1); delta+delta > delta; delta += delta {
k := LoadInt32(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
}
x.i += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestLoadInt32Method(t *testutil.TestRunner) {
var x struct {
before int32
i Int32
after int32
}
x.before = magic32
x.after = magic32
want := int32(0)
for delta := int32(1); delta+delta > delta; delta += delta {
k := x.i.Load()
if k != want {
t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
}
x.i.Store(k + delta)
want = k + delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestLoadUint32(t *testutil.TestRunner) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := LoadUint32(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
}
x.i += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestLoadUint32Method(t *testutil.TestRunner) {
var x struct {
before uint32
i Uint32
after uint32
}
x.before = magic32
x.after = magic32
want := uint32(0)
for delta := uint32(1); delta+delta > delta; delta += delta {
k := x.i.Load()
if k != want {
t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
}
x.i.Store(k + delta)
want = k + delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestLoadInt64(t *testutil.TestRunner) {
var x struct {
before int64
i int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
for delta := int64(1); delta+delta > delta; delta += delta {
k := LoadInt64(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
}
x.i += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestLoadInt64Method(t *testutil.TestRunner) {
var x struct {
before int64
i Int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
want := int64(0)
for delta := int64(1); delta+delta > delta; delta += delta {
k := x.i.Load()
if k != want {
t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
}
x.i.Store(k + delta)
want = k + delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestLoadUint64(t *testutil.TestRunner) {
var x struct {
before uint64
i uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := LoadUint64(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
}
x.i += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestLoadUint64Method(t *testutil.TestRunner) {
var x struct {
before uint64
i Uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
want := uint64(0)
for delta := uint64(1); delta+delta > delta; delta += delta {
k := x.i.Load()
if k != want {
t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
}
x.i.Store(k + delta)
want = k + delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestLoadUintptr(t *testutil.TestRunner) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := LoadUintptr(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
}
x.i += delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestLoadUintptrMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
want := uintptr(0)
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := x.i.Load()
if k != want {
t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
}
x.i.Store(k + delta)
want = k + delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestLoadPointer(t *testutil.TestRunner) {
var x struct {
before uintptr
i unsafe.Pointer
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for _, p := range testPointers() {
x.i = p
k := LoadPointer(&x.i)
if k != p {
t.Fatalf("p=%x k=%x", p, k)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestLoadPointerMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Pointer[byte]
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for _, p := range testPointers() {
p := (*byte)(p)
x.i.Store(p)
k := x.i.Load()
if k != p {
t.Fatalf("p=%x k=%x", p, k)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestStoreInt32(t *testutil.TestRunner) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
v := int32(0)
for delta := int32(1); delta+delta > delta; delta += delta {
StoreInt32(&x.i, v)
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
}
v += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestStoreInt32Method(t *testutil.TestRunner) {
var x struct {
before int32
i Int32
after int32
}
x.before = magic32
x.after = magic32
v := int32(0)
for delta := int32(1); delta+delta > delta; delta += delta {
x.i.Store(v)
if x.i.Load() != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
}
v += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestStoreUint32(t *testutil.TestRunner) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
v := uint32(0)
for delta := uint32(1); delta+delta > delta; delta += delta {
StoreUint32(&x.i, v)
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
}
v += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestStoreUint32Method(t *testutil.TestRunner) {
var x struct {
before uint32
i Uint32
after uint32
}
x.before = magic32
x.after = magic32
v := uint32(0)
for delta := uint32(1); delta+delta > delta; delta += delta {
x.i.Store(v)
if x.i.Load() != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
}
v += delta
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestStoreInt64(t *testutil.TestRunner) {
var x struct {
before int64
i int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
v := int64(0)
for delta := int64(1); delta+delta > delta; delta += delta {
StoreInt64(&x.i, v)
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
}
v += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestStoreInt64Method(t *testutil.TestRunner) {
var x struct {
before int64
i Int64
after int64
}
magic64 := int64(magic64)
x.before = magic64
x.after = magic64
v := int64(0)
for delta := int64(1); delta+delta > delta; delta += delta {
x.i.Store(v)
if x.i.Load() != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
}
v += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestStoreUint64(t *testutil.TestRunner) {
var x struct {
before uint64
i uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
v := uint64(0)
for delta := uint64(1); delta+delta > delta; delta += delta {
StoreUint64(&x.i, v)
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
}
v += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestStoreUint64Method(t *testutil.TestRunner) {
var x struct {
before uint64
i Uint64
after uint64
}
magic64 := uint64(magic64)
x.before = magic64
x.after = magic64
v := uint64(0)
for delta := uint64(1); delta+delta > delta; delta += delta {
x.i.Store(v)
if x.i.Load() != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
}
v += delta
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
}
}
func TestStoreUintptr(t *testutil.TestRunner) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
v := uintptr(0)
for delta := uintptr(1); delta+delta > delta; delta += delta {
StoreUintptr(&x.i, v)
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
}
v += delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestStoreUintptrMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
v := uintptr(0)
for delta := uintptr(1); delta+delta > delta; delta += delta {
x.i.Store(v)
if x.i.Load() != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
}
v += delta
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestStorePointer(t *testutil.TestRunner) {
var x struct {
before uintptr
i unsafe.Pointer
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for _, p := range testPointers() {
StorePointer(&x.i, p)
if x.i != p {
t.Fatalf("x.i=%p p=%p", x.i, p)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestStorePointerMethod(t *testutil.TestRunner) {
var x struct {
before uintptr
i Pointer[byte]
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for _, p := range testPointers() {
p := (*byte)(p)
x.i.Store(p)
if x.i.Load() != p {
t.Fatalf("x.i=%p p=%p", x.i.Load(), p)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
// Tests of correct behavior, with contention.
// (Is the function atomic?)
//
// For each function, we write a "hammer" function that repeatedly
// uses the atomic operation to add 1 to a value. After running
// multiple hammers in parallel, check that we end with the correct
// total.
// Swap can't add 1, so it uses a different scheme.
// The functions repeatedly generate a pseudo-random number such that
// low bits are equal to high bits, swap, check that the old value
// has low and high bits equal.
var hammer32 = map[string]func(*uint32, int){
"SwapInt32": hammerSwapInt32,
"SwapUint32": hammerSwapUint32,
"SwapUintptr": hammerSwapUintptr32,
"AddInt32": hammerAddInt32,
"AddUint32": hammerAddUint32,
"AddUintptr": hammerAddUintptr32,
"CompareAndSwapInt32": hammerCompareAndSwapInt32,
"CompareAndSwapUint32": hammerCompareAndSwapUint32,
"CompareAndSwapUintptr": hammerCompareAndSwapUintptr32,
"SwapInt32Method": hammerSwapInt32Method,
"SwapUint32Method": hammerSwapUint32Method,
"SwapUintptrMethod": hammerSwapUintptr32Method,
"AddInt32Method": hammerAddInt32Method,
"AddUint32Method": hammerAddUint32Method,
"AddUintptrMethod": hammerAddUintptr32Method,
"CompareAndSwapInt32Method": hammerCompareAndSwapInt32Method,
"CompareAndSwapUint32Method": hammerCompareAndSwapUint32Method,
"CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr32Method,
}
func init() {
var v uint64 = 1 << 50
if uintptr(v) != 0 {
// 64-bit system; clear uintptr tests
delete(hammer32, "SwapUintptr")
delete(hammer32, "AddUintptr")
delete(hammer32, "CompareAndSwapUintptr")
delete(hammer32, "SwapUintptrMethod")
delete(hammer32, "AddUintptrMethod")
delete(hammer32, "CompareAndSwapUintptrMethod")
}
}
func hammerSwapInt32(uaddr *uint32, count int) {
addr := (*int32)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
old := uint32(SwapInt32(addr, int32(new)))
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
}
}
}
func hammerSwapInt32Method(uaddr *uint32, count int) {
addr := (*Int32)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
old := uint32(addr.Swap(int32(new)))
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
}
}
}
func hammerSwapUint32(addr *uint32, count int) {
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
old := SwapUint32(addr, new)
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
}
}
}
func hammerSwapUint32Method(uaddr *uint32, count int) {
addr := (*Uint32)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
old := addr.Swap(new)
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
}
}
}
func hammerSwapUintptr32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*uintptr)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
old := SwapUintptr(addr, new)
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old))
}
}
}
func hammerSwapUintptr32Method(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*Uintptr)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
old := addr.Swap(new)
if old>>16 != old<<16>>16 {
panic(fmt.Sprintf("Uintptr.Swap is not atomic: %#08x", old))
}
}
}
func hammerAddInt32(uaddr *uint32, count int) {
addr := (*int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
AddInt32(addr, 1)
}
}
func hammerAddInt32Method(uaddr *uint32, count int) {
addr := (*Int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerAddUint32(addr *uint32, count int) {
for i := 0; i < count; i++ {
AddUint32(addr, 1)
}
}
func hammerAddUint32Method(uaddr *uint32, count int) {
addr := (*Uint32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerAddUintptr32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
AddUintptr(addr, 1)
}
}
func hammerAddUintptr32Method(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*Uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerCompareAndSwapInt32(uaddr *uint32, count int) {
addr := (*int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := LoadInt32(addr)
if CompareAndSwapInt32(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapInt32Method(uaddr *uint32, count int) {
addr := (*Int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint32(addr *uint32, count int) {
for i := 0; i < count; i++ {
for {
v := LoadUint32(addr)
if CompareAndSwapUint32(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint32Method(uaddr *uint32, count int) {
addr := (*Uint32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := LoadUintptr(addr)
if CompareAndSwapUintptr(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr32Method(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
addr := (*Uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func TestHammer32(t *testutil.TestRunner) {
const p = 4
n := 100000
if short {
n = 1000
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for name, testf := range hammer32 {
c := make(chan int)
var val uint32
for i := 0; i < p; i++ {
go func() {
defer func() {
if err := recover(); err != nil {
t.Error(err.(string))
}
c <- 1
}()
testf(&val, n)
}()
}
for i := 0; i < p; i++ {
<-c
}
if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p {
t.Fatalf("%s: val=%d want %d", name, val, n*p)
}
}
}
var hammer64 = map[string]func(*uint64, int){
"SwapInt64": hammerSwapInt64,
"SwapUint64": hammerSwapUint64,
"SwapUintptr": hammerSwapUintptr64,
"AddInt64": hammerAddInt64,
"AddUint64": hammerAddUint64,
"AddUintptr": hammerAddUintptr64,
"CompareAndSwapInt64": hammerCompareAndSwapInt64,
"CompareAndSwapUint64": hammerCompareAndSwapUint64,
"CompareAndSwapUintptr": hammerCompareAndSwapUintptr64,
"SwapInt64Method": hammerSwapInt64Method,
"SwapUint64Method": hammerSwapUint64Method,
"SwapUintptrMethod": hammerSwapUintptr64Method,
"AddInt64Method": hammerAddInt64Method,
"AddUint64Method": hammerAddUint64Method,
"AddUintptrMethod": hammerAddUintptr64Method,
"CompareAndSwapInt64Method": hammerCompareAndSwapInt64Method,
"CompareAndSwapUint64Method": hammerCompareAndSwapUint64Method,
"CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr64Method,
}
func init() {
var v uint64 = 1 << 50
if uintptr(v) == 0 {
// 32-bit system; clear uintptr tests
delete(hammer64, "SwapUintptr")
delete(hammer64, "SwapUintptrMethod")
delete(hammer64, "AddUintptr")
delete(hammer64, "AddUintptrMethod")
delete(hammer64, "CompareAndSwapUintptr")
delete(hammer64, "CompareAndSwapUintptrMethod")
}
}
func hammerSwapInt64(uaddr *uint64, count int) {
addr := (*int64)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
old := uint64(SwapInt64(addr, int64(new)))
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
}
}
}
func hammerSwapInt64Method(uaddr *uint64, count int) {
addr := (*Int64)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
old := uint64(addr.Swap(int64(new)))
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
}
}
}
func hammerSwapUint64(addr *uint64, count int) {
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
old := SwapUint64(addr, new)
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
}
}
}
func hammerSwapUint64Method(uaddr *uint64, count int) {
addr := (*Uint64)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
old := addr.Swap(new)
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
}
}
}
const arch32 = unsafe.Sizeof(uintptr(0)) == 4
func hammerSwapUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
if !arch32 {
addr := (*uintptr)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
old := SwapUintptr(addr, new)
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
}
}
}
}
func hammerSwapUintptr64Method(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
if !arch32 {
addr := (*Uintptr)(unsafe.Pointer(uaddr))
seed := int(uintptr(unsafe.Pointer(&count)))
for i := 0; i < count; i++ {
new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
old := addr.Swap(new)
if old>>32 != old<<32>>32 {
panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
}
}
}
}
func hammerAddInt64(uaddr *uint64, count int) {
addr := (*int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
AddInt64(addr, 1)
}
}
func hammerAddInt64Method(uaddr *uint64, count int) {
addr := (*Int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerAddUint64(addr *uint64, count int) {
for i := 0; i < count; i++ {
AddUint64(addr, 1)
}
}
func hammerAddUint64Method(uaddr *uint64, count int) {
addr := (*Uint64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerAddUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
AddUintptr(addr, 1)
}
}
func hammerAddUintptr64Method(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
addr := (*Uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
addr.Add(1)
}
}
func hammerCompareAndSwapInt64(uaddr *uint64, count int) {
addr := (*int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := LoadInt64(addr)
if CompareAndSwapInt64(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapInt64Method(uaddr *uint64, count int) {
addr := (*Int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint64(addr *uint64, count int) {
for i := 0; i < count; i++ {
for {
v := LoadUint64(addr)
if CompareAndSwapUint64(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint64Method(uaddr *uint64, count int) {
addr := (*Uint64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := LoadUintptr(addr)
if CompareAndSwapUintptr(addr, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr64Method(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
addr := (*Uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
v := addr.Load()
if addr.CompareAndSwap(v, v+1) {
break
}
}
}
}
func TestHammer64(t *testutil.TestRunner) {
const p = 4
n := 100000
if short {
n = 1000
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for name, testf := range hammer64 {
c := make(chan int)
var val uint64
for i := 0; i < p; i++ {
go func() {
defer func() {
if err := recover(); err != nil {
t.Error(err.(string))
}
c <- 1
}()
testf(&val, n)
}()
}
for i := 0; i < p; i++ {
<-c
}
if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p {
t.Fatalf("%s: val=%d want %d", name, val, n*p)
}
}
}
func hammerStoreLoadInt32(t testing.TB, paddr unsafe.Pointer) {
addr := (*int32)(paddr)
v := LoadInt32(addr)
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Int32: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
StoreInt32(addr, new)
}
func hammerStoreLoadInt32Method(t testing.TB, paddr unsafe.Pointer) {
addr := (*int32)(paddr)
v := LoadInt32(addr)
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Int32: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
StoreInt32(addr, new)
}
func hammerStoreLoadUint32(t testing.TB, paddr unsafe.Pointer) {
addr := (*uint32)(paddr)
v := LoadUint32(addr)
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
StoreUint32(addr, new)
}
func hammerStoreLoadUint32Method(t testing.TB, paddr unsafe.Pointer) {
addr := (*Uint32)(paddr)
v := addr.Load()
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
addr.Store(new)
}
func hammerStoreLoadInt64(t testing.TB, paddr unsafe.Pointer) {
addr := (*int64)(paddr)
v := LoadInt64(addr)
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Int64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
StoreInt64(addr, new)
}
func hammerStoreLoadInt64Method(t testing.TB, paddr unsafe.Pointer) {
addr := (*Int64)(paddr)
v := addr.Load()
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Int64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
addr.Store(new)
}
func hammerStoreLoadUint64(t testing.TB, paddr unsafe.Pointer) {
addr := (*uint64)(paddr)
v := LoadUint64(addr)
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
StoreUint64(addr, new)
}
func hammerStoreLoadUint64Method(t testing.TB, paddr unsafe.Pointer) {
addr := (*Uint64)(paddr)
v := addr.Load()
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
addr.Store(new)
}
func hammerStoreLoadUintptr(t testing.TB, paddr unsafe.Pointer) {
addr := (*uintptr)(paddr)
v := LoadUintptr(addr)
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
} else {
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
StoreUintptr(addr, new)
}
//go:nocheckptr
func hammerStoreLoadUintptrMethod(t testing.TB, paddr unsafe.Pointer) {
addr := (*Uintptr)(paddr)
v := addr.Load()
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
} else {
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
addr.Store(new)
}
// This code is just testing that LoadPointer/StorePointer operate
// atomically; it's not actually calculating pointers.
//
//go:nocheckptr
func hammerStoreLoadPointer(t testing.TB, paddr unsafe.Pointer) {
addr := (*unsafe.Pointer)(paddr)
v := uintptr(LoadPointer(addr))
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
} else {
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
StorePointer(addr, unsafe.Pointer(new))
}
// This code is just testing that LoadPointer/StorePointer operate
// atomically; it's not actually calculating pointers.
//
//go:nocheckptr
func hammerStoreLoadPointerMethod(t testing.TB, paddr unsafe.Pointer) {
addr := (*Pointer[byte])(paddr)
v := uintptr(unsafe.Pointer(addr.Load()))
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
} else {
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
addr.Store((*byte)(unsafe.Pointer(new)))
}
func TestHammerStoreLoad(t *testutil.TestRunner) {
tests := []func(testing.TB, unsafe.Pointer){
hammerStoreLoadInt32, hammerStoreLoadUint32,
hammerStoreLoadUintptr, hammerStoreLoadPointer,
hammerStoreLoadInt32Method, hammerStoreLoadUint32Method,
hammerStoreLoadUintptrMethod, hammerStoreLoadPointerMethod,
hammerStoreLoadInt64, hammerStoreLoadUint64,
hammerStoreLoadInt64Method, hammerStoreLoadUint64Method,
}
n := int(1e6)
if short {
n = int(1e4)
}
const procs = 8
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
// Disable the GC because hammerStoreLoadPointer invokes
// write barriers on values that aren't real pointers.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
// Ensure any in-progress GC is finished.
runtime.GC()
for _, tt := range tests {
c := make(chan int)
var val uint64
for p := 0; p < procs; p++ {
go func() {
for i := 0; i < n; i++ {
tt(t, unsafe.Pointer(&val))
}
c <- 1
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
}
func TestStoreLoadSeqCst32(t *testutil.TestRunner) {
if runtime.NumCPU() == 1 {
t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int32(1e3)
if short {
N = int32(1e2)
}
c := make(chan bool, 2)
X := [2]int32{}
ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}}
for p := 0; p < 2; p++ {
go func(me int) {
he := 1 - me
for i := int32(1); i < N; i++ {
StoreInt32(&X[me], i)
my := LoadInt32(&X[he])
StoreInt32(&ack[me][i%3], my)
for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ {
if w%1000 == 0 {
runtime.Gosched()
}
}
his := LoadInt32(&ack[he][i%3])
if (my != i && my != i-1) || (his != i && his != i-1) {
t.Errorf("invalid values: %d/%d (%d)", my, his, i)
break
}
if my != i && his != i {
t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
break
}
StoreInt32(&ack[me][(i-1)%3], -1)
}
c <- true
}(p)
}
<-c
<-c
}
func TestStoreLoadSeqCst64(t *testutil.TestRunner) {
if runtime.NumCPU() == 1 {
t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int64(1e3)
if short {
N = int64(1e2)
}
c := make(chan bool, 2)
X := [2]int64{}
ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}}
for p := 0; p < 2; p++ {
go func(me int) {
he := 1 - me
for i := int64(1); i < N; i++ {
StoreInt64(&X[me], i)
my := LoadInt64(&X[he])
StoreInt64(&ack[me][i%3], my)
for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ {
if w%1000 == 0 {
runtime.Gosched()
}
}
his := LoadInt64(&ack[he][i%3])
if (my != i && my != i-1) || (his != i && his != i-1) {
t.Errorf("invalid values: %d/%d (%d)", my, his, i)
break
}
if my != i && his != i {
t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
break
}
StoreInt64(&ack[me][(i-1)%3], -1)
}
c <- true
}(p)
}
<-c
<-c
}
func TestStoreLoadRelAcq32(t *testutil.TestRunner) {
if runtime.NumCPU() == 1 {
t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int32(1e3)
if short {
N = int32(1e2)
}
c := make(chan bool, 2)
type Data struct {
signal int32
pad1 [128]int8
data1 int32
pad2 [128]int8
data2 float32
}
var X Data
for p := int32(0); p < 2; p++ {
go func(p int32) {
for i := int32(1); i < N; i++ {
if (i+p)%2 == 0 {
X.data1 = i
X.data2 = float32(i)
StoreInt32(&X.signal, i)
} else {
for w := 1; LoadInt32(&X.signal) != i; w++ {
if w%1000 == 0 {
runtime.Gosched()
}
}
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float32(i) {
t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
break
}
}
}
c <- true
}(p)
}
<-c
<-c
}
func TestStoreLoadRelAcq64(t *testutil.TestRunner) {
if runtime.NumCPU() == 1 {
t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int64(1e3)
if short {
N = int64(1e2)
}
c := make(chan bool, 2)
type Data struct {
signal int64
pad1 [128]int8
data1 int64
pad2 [128]int8
data2 float64
}
var X Data
for p := int64(0); p < 2; p++ {
go func(p int64) {
for i := int64(1); i < N; i++ {
if (i+p)%2 == 0 {
X.data1 = i
X.data2 = float64(i)
StoreInt64(&X.signal, i)
} else {
for w := 1; LoadInt64(&X.signal) != i; w++ {
if w%1000 == 0 {
runtime.Gosched()
}
}
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float64(i) {
t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
break
}
}
}
c <- true
}(p)
}
<-c
<-c
}
func shouldPanic(t testing.TB, name string, f func()) {
defer func() {
// Check that all GC maps are sane.
runtime.GC()
err := recover()
want := "unaligned 64-bit atomic operation"
if err == nil {
t.Errorf("%s did not panic", name)
} else if s, _ := err.(string); s != want {
t.Errorf("%s: wanted panic %q, got %q", name, want, err)
}
}()
f()
}
func TestUnaligned64(t *testutil.TestRunner) {
// Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
if !arch32 {
t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)
p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
shouldPanic(t, "LoadUint64", func() { LoadUint64(p) })
shouldPanic(t, "LoadUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Load() })
shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) })
shouldPanic(t, "StoreUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Store(1) })
shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) })
shouldPanic(t, "CompareAndSwapUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).CompareAndSwap(1, 2) })
shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) })
shouldPanic(t, "AddUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Add(3) })
}
func TestAutoAligned64(t *testutil.TestRunner) {
var signed struct {
_ uint32
i Int64
}
if o := reflect.TypeOf(&signed).Elem().Field(1).Offset; o != 8 {
t.Fatalf("Int64 offset = %d, want 8", o)
}
if p := reflect.ValueOf(&signed).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
}
var unsigned struct {
_ uint32
i Uint64
}
if o := reflect.TypeOf(&unsigned).Elem().Field(1).Offset; o != 8 {
t.Fatalf("Uint64 offset = %d, want 8", o)
}
if p := reflect.ValueOf(&unsigned).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
}
}
func TestNilDeref(t *testutil.TestRunner) {
funcs := [...]func(){
func() { CompareAndSwapInt32(nil, 0, 0) },
func() { (*Int32)(nil).CompareAndSwap(0, 0) },
func() { CompareAndSwapInt64(nil, 0, 0) },
func() { (*Int64)(nil).CompareAndSwap(0, 0) },
func() { CompareAndSwapUint32(nil, 0, 0) },
func() { (*Uint32)(nil).CompareAndSwap(0, 0) },
func() { CompareAndSwapUint64(nil, 0, 0) },
func() { (*Uint64)(nil).CompareAndSwap(0, 0) },
func() { CompareAndSwapUintptr(nil, 0, 0) },
func() { (*Uintptr)(nil).CompareAndSwap(0, 0) },
func() { CompareAndSwapPointer(nil, nil, nil) },
func() { (*Pointer[byte])(nil).CompareAndSwap(nil, nil) },
func() { SwapInt32(nil, 0) },
func() { (*Int32)(nil).Swap(0) },
func() { SwapUint32(nil, 0) },
func() { (*Uint32)(nil).Swap(0) },
func() { SwapInt64(nil, 0) },
func() { (*Int64)(nil).Swap(0) },
func() { SwapUint64(nil, 0) },
func() { (*Uint64)(nil).Swap(0) },
func() { SwapUintptr(nil, 0) },
func() { (*Uintptr)(nil).Swap(0) },
func() { SwapPointer(nil, nil) },
func() { (*Pointer[byte])(nil).Swap(nil) },
func() { AddInt32(nil, 0) },
func() { (*Int32)(nil).Add(0) },
func() { AddUint32(nil, 0) },
func() { (*Uint32)(nil).Add(0) },
func() { AddInt64(nil, 0) },
func() { (*Int64)(nil).Add(0) },
func() { AddUint64(nil, 0) },
func() { (*Uint64)(nil).Add(0) },
func() { AddUintptr(nil, 0) },
func() { (*Uintptr)(nil).Add(0) },
func() { LoadInt32(nil) },
func() { (*Int32)(nil).Load() },
func() { LoadInt64(nil) },
func() { (*Int64)(nil).Load() },
func() { LoadUint32(nil) },
func() { (*Uint32)(nil).Load() },
func() { LoadUint64(nil) },
func() { (*Uint64)(nil).Load() },
func() { LoadUintptr(nil) },
func() { (*Uintptr)(nil).Load() },
func() { LoadPointer(nil) },
func() { (*Pointer[byte])(nil).Load() },
func() { StoreInt32(nil, 0) },
func() { (*Int32)(nil).Store(0) },
func() { StoreInt64(nil, 0) },
func() { (*Int64)(nil).Store(0) },
func() { StoreUint32(nil, 0) },
func() { (*Uint32)(nil).Store(0) },
func() { StoreUint64(nil, 0) },
func() { (*Uint64)(nil).Store(0) },
func() { StoreUintptr(nil, 0) },
func() { (*Uintptr)(nil).Store(0) },
func() { StorePointer(nil, nil) },
func() { (*Pointer[byte])(nil).Store(nil) },
}
for _, f := range funcs {
func() {
defer func() {
runtime.GC()
recover()
}()
f()
}()
}
}
// Test that this compiles.
// When atomic.Pointer used _ [0]T, it did not.
type List struct {
Next Pointer[List]
}
module atomic
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"utils/testutil"
)
func main() {
testutil.RunTest(TestSwapInt32, "TestSwapInt32")
testutil.RunTest(TestSwapInt32Method, "TestSwapInt32Method")
testutil.RunTest(TestSwapUint32, "TestSwapUint32")
testutil.RunTest(TestSwapUint32Method, "TestSwapUint32Method")
testutil.RunTest(TestSwapInt64, "TestSwapInt64")
testutil.RunTest(TestSwapInt64Method, "TestSwapInt64Method")
testutil.RunTest(TestSwapUint64, "TestSwapUint64")
testutil.RunTest(TestSwapUint64Method, "TestSwapUint64Method")
testutil.RunTest(TestSwapUintptr, "TestSwapUintptr")
testutil.RunTest(TestSwapUintptrMethod, "TestSwapUintptrMethod")
testutil.RunTest(TestSwapPointer, "TestSwapPointer")
testutil.RunTest(TestSwapPointerMethod, "TestSwapPointerMethod")
testutil.RunTest(TestAddInt32, "TestAddInt32")
testutil.RunTest(TestAddInt32Method, "TestAddInt32Method")
testutil.RunTest(TestAddUint32, "TestAddUint32")
testutil.RunTest(TestAddUint32Method, "TestAddUint32Method")
testutil.RunTest(TestAddInt64, "TestAddInt64")
testutil.RunTest(TestAddInt64Method, "TestAddInt64Method")
testutil.RunTest(TestAddUint64, "TestAddUint64")
testutil.RunTest(TestAddUint64Method, "TestAddUint64Method")
testutil.RunTest(TestAddUintptr, "TestAddUintptr")
testutil.RunTest(TestAddUintptrMethod, "TestAddUintptrMethod")
testutil.RunTest(TestCompareAndSwapInt32, "TestCompareAndSwapInt32")
testutil.RunTest(TestCompareAndSwapInt32Method, "TestCompareAndSwapInt32Method")
testutil.RunTest(TestCompareAndSwapUint32, "TestCompareAndSwapUint32")
testutil.RunTest(TestCompareAndSwapUint32Method, "TestCompareAndSwapUint32Method")
testutil.RunTest(TestCompareAndSwapInt64, "TestCompareAndSwapInt64")
testutil.RunTest(TestCompareAndSwapInt64Method, "TestCompareAndSwapInt64Method")
testutil.RunTest(TestCompareAndSwapUint64, "TestCompareAndSwapUint64")
testutil.RunTest(TestCompareAndSwapUint64Method, "TestCompareAndSwapUint64Method")
testutil.RunTest(TestCompareAndSwapUintptr, "TestCompareAndSwapUintptr")
testutil.RunTest(TestCompareAndSwapUintptrMethod, "TestCompareAndSwapUintptrMethod")
testutil.RunTest(TestCompareAndSwapPointer, "TestCompareAndSwapPointer")
testutil.RunTest(TestCompareAndSwapPointerMethod, "TestCompareAndSwapPointerMethod")
testutil.RunTest(TestLoadInt32, "TestLoadInt32")
testutil.RunTest(TestLoadInt32Method, "TestLoadInt32Method")
testutil.RunTest(TestLoadUint32, "TestLoadUint32")
testutil.RunTest(TestLoadUint32Method, "TestLoadUint32Method")
testutil.RunTest(TestLoadInt64, "TestLoadInt64")
testutil.RunTest(TestLoadInt64Method, "TestLoadInt64Method")
testutil.RunTest(TestLoadUint64, "TestLoadUint64")
testutil.RunTest(TestLoadUint64Method, "TestLoadUint64Method")
testutil.RunTest(TestLoadUintptr, "TestLoadUintptr")
testutil.RunTest(TestLoadUintptrMethod, "TestLoadUintptrMethod")
testutil.RunTest(TestLoadPointer, "TestLoadPointer")
testutil.RunTest(TestLoadPointerMethod, "TestLoadPointerMethod")
testutil.RunTest(TestStoreInt32, "TestStoreInt32")
testutil.RunTest(TestStoreInt32Method, "TestStoreInt32Method")
testutil.RunTest(TestStoreUint32, "TestStoreUint32")
testutil.RunTest(TestStoreUint32Method, "TestStoreUint32Method")
testutil.RunTest(TestStoreInt64, "TestStoreInt64")
testutil.RunTest(TestStoreInt64Method, "TestStoreInt64Method")
testutil.RunTest(TestStoreUint64, "TestStoreUint64")
testutil.RunTest(TestStoreUint64Method, "TestStoreUint64Method")
testutil.RunTest(TestStoreUintptr, "TestStoreUintptr")
testutil.RunTest(TestStoreUintptrMethod, "TestStoreUintptrMethod")
testutil.RunTest(TestStorePointer, "TestStorePointer")
testutil.RunTest(TestStorePointerMethod, "TestStorePointerMethod")
testutil.RunTest(TestHammer32, "TestHammer32")
testutil.RunTest(TestHammer64, "TestHammer64")
testutil.RunTest(TestAutoAligned64, "TestAutoAligned64")
testutil.RunTest(TestNilDeref, "TestNilDeref")
testutil.RunTest(TestStoreLoadSeqCst32, "TestStoreLoadSeqCst32")
testutil.RunTest(TestStoreLoadSeqCst64, "TestStoreLoadSeqCst64")
testutil.RunTest(TestStoreLoadRelAcq32, "TestStoreLoadRelAcq32")
testutil.RunTest(TestStoreLoadRelAcq64, "TestStoreLoadRelAcq64")
testutil.RunTest(TestUnaligned64, "TestUnaligned64")
testutil.RunTest(TestHammerStoreLoad, "TestHammerStoreLoad")
fmt.Println("Atomic tests passed")
}
module mtgeneral
go 1.22
toolchain go1.22.0
package main
import (
"fmt"
"os"
"runtime"
"sync"
"sync/atomic"
)
func main() {
// try some concurrency!
var wg sync.WaitGroup
wg.Add(2)
var x atomic.Int32
go func() {
x.Add(2)
wg.Done()
}()
go func() {
x.Add(40)
wg.Done()
}()
wg.Wait()
fmt.Printf("waitgroup result: %d\n", x.Load())
// channels
a := make(chan int, 1)
b := make(chan int)
c := make(chan int)
go func() {
t0 := <-a
b <- t0
}()
go func() {
t1 := <-b
c <- t1
}()
a <- 1234
out := <-c
fmt.Printf("channels result: %d\n", out)
// try a GC! (the runtime might not have run one yet)
runtime.GC()
_, _ = os.Stdout.Write([]byte("GC complete!\n"))
}
...@@ -3,3 +3,6 @@ module map ...@@ -3,3 +3,6 @@ module map
go 1.22 go 1.22
toolchain go1.22.0 toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
...@@ -2,42 +2,18 @@ package main ...@@ -2,42 +2,18 @@ package main
import ( import (
"fmt" "fmt"
"sync"
"utils/testutil"
) )
func main() { func main() {
var m sync.Map testutil.RunTest(TestMapMatchesRWMutex, "TestMapMatchesRWMutex")
testutil.RunTest(TestMapMatchesDeepCopy, "TestMapMatchesDeepCopy")
m.Store("hello", "world") testutil.RunTest(TestConcurrentRange, "TestConcurrentRange")
m.Store("foo", "bar") testutil.RunTest(TestIssue40999, "TestIssue40999")
m.Store("baz", "qux") testutil.RunTest(TestMapRangeNestedCall, "TestMapRangeNestedCall")
testutil.RunTest(TestCompareAndSwap_NonExistingKey, "TestCompareAndSwap_NonExistingKey")
m.Delete("foo") testutil.RunTest(TestMapRangeNoAllocations, "TestMapRangeNoAllocations")
m.Load("baz")
go func() {
m.CompareAndDelete("hello", "world")
m.LoadAndDelete("baz")
}()
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
m.Load("hello")
m.Load("baz")
m.Range(func(k, v interface{}) bool {
m.Load("hello")
m.Load("baz")
return true
})
m.CompareAndSwap("hello", "world", "Go")
m.LoadOrStore("hello", "world")
wg.Done()
}()
}
wg.Wait()
fmt.Println("Map test passed") fmt.Println("Map test passed")
} }
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_reference_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"sync"
"sync/atomic"
)
// This file contains reference map implementations for unit-tests.
// mapInterface is the interface Map implements.
type mapInterface interface {
Load(any) (any, bool)
Store(key, value any)
LoadOrStore(key, value any) (actual any, loaded bool)
LoadAndDelete(key any) (value any, loaded bool)
Delete(any)
Swap(key, value any) (previous any, loaded bool)
CompareAndSwap(key, old, new any) (swapped bool)
CompareAndDelete(key, old any) (deleted bool)
Range(func(key, value any) (shouldContinue bool))
}
var (
_ mapInterface = &RWMutexMap{}
_ mapInterface = &DeepCopyMap{}
)
// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
type RWMutexMap struct {
mu sync.RWMutex
dirty map[any]any
}
func (m *RWMutexMap) Load(key any) (value any, ok bool) {
m.mu.RLock()
value, ok = m.dirty[key]
m.mu.RUnlock()
return
}
func (m *RWMutexMap) Store(key, value any) {
m.mu.Lock()
if m.dirty == nil {
m.dirty = make(map[any]any)
}
m.dirty[key] = value
m.mu.Unlock()
}
func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) {
m.mu.Lock()
actual, loaded = m.dirty[key]
if !loaded {
actual = value
if m.dirty == nil {
m.dirty = make(map[any]any)
}
m.dirty[key] = value
}
m.mu.Unlock()
return actual, loaded
}
func (m *RWMutexMap) Swap(key, value any) (previous any, loaded bool) {
m.mu.Lock()
if m.dirty == nil {
m.dirty = make(map[any]any)
}
previous, loaded = m.dirty[key]
m.dirty[key] = value
m.mu.Unlock()
return
}
func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) {
m.mu.Lock()
value, loaded = m.dirty[key]
if !loaded {
m.mu.Unlock()
return nil, false
}
delete(m.dirty, key)
m.mu.Unlock()
return value, loaded
}
func (m *RWMutexMap) Delete(key any) {
m.mu.Lock()
delete(m.dirty, key)
m.mu.Unlock()
}
func (m *RWMutexMap) CompareAndSwap(key, old, new any) (swapped bool) {
m.mu.Lock()
defer m.mu.Unlock()
if m.dirty == nil {
return false
}
value, loaded := m.dirty[key]
if loaded && value == old {
m.dirty[key] = new
return true
}
return false
}
func (m *RWMutexMap) CompareAndDelete(key, old any) (deleted bool) {
m.mu.Lock()
defer m.mu.Unlock()
if m.dirty == nil {
return false
}
value, loaded := m.dirty[key]
if loaded && value == old {
delete(m.dirty, key)
return true
}
return false
}
func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) {
m.mu.RLock()
keys := make([]any, 0, len(m.dirty))
for k := range m.dirty {
keys = append(keys, k)
}
m.mu.RUnlock()
for _, k := range keys {
v, ok := m.Load(k)
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
// DeepCopyMap is an implementation of mapInterface using a Mutex and
// atomic.Value. It makes deep copies of the map on every write to avoid
// acquiring the Mutex in Load.
type DeepCopyMap struct {
mu sync.Mutex
clean atomic.Value
}
func (m *DeepCopyMap) Load(key any) (value any, ok bool) {
clean, _ := m.clean.Load().(map[any]any)
value, ok = clean[key]
return value, ok
}
func (m *DeepCopyMap) Store(key, value any) {
m.mu.Lock()
dirty := m.dirty()
dirty[key] = value
m.clean.Store(dirty)
m.mu.Unlock()
}
func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) {
clean, _ := m.clean.Load().(map[any]any)
actual, loaded = clean[key]
if loaded {
return actual, loaded
}
m.mu.Lock()
// Reload clean in case it changed while we were waiting on m.mu.
clean, _ = m.clean.Load().(map[any]any)
actual, loaded = clean[key]
if !loaded {
dirty := m.dirty()
dirty[key] = value
actual = value
m.clean.Store(dirty)
}
m.mu.Unlock()
return actual, loaded
}
func (m *DeepCopyMap) Swap(key, value any) (previous any, loaded bool) {
m.mu.Lock()
dirty := m.dirty()
previous, loaded = dirty[key]
dirty[key] = value
m.clean.Store(dirty)
m.mu.Unlock()
return
}
func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) {
m.mu.Lock()
dirty := m.dirty()
value, loaded = dirty[key]
delete(dirty, key)
m.clean.Store(dirty)
m.mu.Unlock()
return
}
func (m *DeepCopyMap) Delete(key any) {
m.mu.Lock()
dirty := m.dirty()
delete(dirty, key)
m.clean.Store(dirty)
m.mu.Unlock()
}
func (m *DeepCopyMap) CompareAndSwap(key, old, new any) (swapped bool) {
clean, _ := m.clean.Load().(map[any]any)
if previous, ok := clean[key]; !ok || previous != old {
return false
}
m.mu.Lock()
defer m.mu.Unlock()
dirty := m.dirty()
value, loaded := dirty[key]
if loaded && value == old {
dirty[key] = new
m.clean.Store(dirty)
return true
}
return false
}
func (m *DeepCopyMap) CompareAndDelete(key, old any) (deleted bool) {
clean, _ := m.clean.Load().(map[any]any)
if previous, ok := clean[key]; !ok || previous != old {
return false
}
m.mu.Lock()
defer m.mu.Unlock()
dirty := m.dirty()
value, loaded := dirty[key]
if loaded && value == old {
delete(dirty, key)
m.clean.Store(dirty)
return true
}
return false
}
func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) {
clean, _ := m.clean.Load().(map[any]any)
for k, v := range clean {
if !f(k, v) {
break
}
}
}
func (m *DeepCopyMap) dirty() map[any]any {
clean, _ := m.clean.Load().(map[any]any)
dirty := make(map[any]any, len(clean)+1)
for k, v := range clean {
dirty[k] = v
}
return dirty
}
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"math/rand"
"reflect"
"runtime"
"sync"
"sync/atomic"
"testing"
"testing/quick"
"utils/testutil"
)
type mapOp string
const (
opLoad = mapOp("Load")
opStore = mapOp("Store")
opLoadOrStore = mapOp("LoadOrStore")
opLoadAndDelete = mapOp("LoadAndDelete")
opDelete = mapOp("Delete")
opSwap = mapOp("Swap")
opCompareAndSwap = mapOp("CompareAndSwap")
opCompareAndDelete = mapOp("CompareAndDelete")
)
var mapOps = [...]mapOp{
opLoad,
opStore,
opLoadOrStore,
opLoadAndDelete,
opDelete,
opSwap,
opCompareAndSwap,
opCompareAndDelete,
}
// mapCall is a quick.Generator for calls on mapInterface.
type mapCall struct {
op mapOp
k, v any
}
func (c mapCall) apply(m mapInterface) (any, bool) {
switch c.op {
case opLoad:
return m.Load(c.k)
case opStore:
m.Store(c.k, c.v)
return nil, false
case opLoadOrStore:
return m.LoadOrStore(c.k, c.v)
case opLoadAndDelete:
return m.LoadAndDelete(c.k)
case opDelete:
m.Delete(c.k)
return nil, false
case opSwap:
return m.Swap(c.k, c.v)
case opCompareAndSwap:
if m.CompareAndSwap(c.k, c.v, rand.Int()) {
m.Delete(c.k)
return c.v, true
}
return nil, false
case opCompareAndDelete:
if m.CompareAndDelete(c.k, c.v) {
if _, ok := m.Load(c.k); !ok {
return nil, true
}
}
return nil, false
default:
panic("invalid mapOp")
}
}
type mapResult struct {
value any
ok bool
}
func randValue(r *rand.Rand) any {
b := make([]byte, r.Intn(4))
for i := range b {
b[i] = 'a' + byte(rand.Intn(26))
}
return string(b)
}
func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
switch c.op {
case opStore, opLoadOrStore:
c.v = randValue(r)
}
return reflect.ValueOf(c)
}
func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) {
for _, c := range calls {
v, ok := c.apply(m)
results = append(results, mapResult{v, ok})
}
final = make(map[any]any)
m.Range(func(k, v any) bool {
final[k] = v
return true
})
return results, final
}
func applyMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(sync.Map), calls)
}
func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(RWMutexMap), calls)
}
func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(DeepCopyMap), calls)
}
func TestMapMatchesRWMutex(t *testutil.TestRunner) {
if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
t.Error(err)
}
}
func TestMapMatchesDeepCopy(t *testutil.TestRunner) {
if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
t.Error(err)
}
}
func TestConcurrentRange(t *testutil.TestRunner) {
const mapSize = 1 << 10
m := new(sync.Map)
for n := int64(1); n <= mapSize; n++ {
m.Store(n, int64(n))
}
done := make(chan struct{})
var wg sync.WaitGroup
defer func() {
close(done)
wg.Wait()
}()
for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
r := rand.New(rand.NewSource(g))
wg.Add(1)
go func(g int64) {
defer wg.Done()
for i := int64(0); ; i++ {
select {
case <-done:
return
default:
}
for n := int64(1); n < mapSize; n++ {
if r.Int63n(mapSize) == 0 {
m.Store(n, n*i*g)
} else {
m.Load(n)
}
}
}
}(g)
}
//iters := 1 << 10
//if testing.Short() {
// iters = 16
//}
iters := 16
for n := iters; n > 0; n-- {
seen := make(map[int64]bool, mapSize)
m.Range(func(ki, vi any) bool {
k, v := ki.(int64), vi.(int64)
if v%k != 0 {
t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
}
if seen[k] {
t.Fatalf("Range visited key %v twice", k)
}
seen[k] = true
return true
})
if len(seen) != mapSize {
t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
}
}
}
func TestIssue40999(t *testutil.TestRunner) {
var m sync.Map
// Since the miss-counting in missLocked (via Delete)
// compares the miss count with len(m.dirty),
// add an initial entry to bias len(m.dirty) above the miss count.
m.Store(nil, struct{}{})
var finalized uint32
// Set finalizers that count for collected keys. A non-zero count
// indicates that keys have not been leaked.
for atomic.LoadUint32(&finalized) == 0 {
p := new(int)
runtime.SetFinalizer(p, func(*int) {
atomic.AddUint32(&finalized, 1)
})
m.Store(p, struct{}{})
m.Delete(p)
runtime.GC()
}
}
func TestMapRangeNestedCall(t *testutil.TestRunner) { // Issue 46399
var m sync.Map
for i, v := range [3]string{"hello", "world", "Go"} {
m.Store(i, v)
}
m.Range(func(key, value any) bool {
m.Range(func(key, value any) bool {
// We should be able to load the key offered in the Range callback,
// because there are no concurrent Delete involved in this tested map.
if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) {
t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value)
}
// We didn't keep 42 and a value into the map before, if somehow we loaded
// a value from such a key, meaning there must be an internal bug regarding
// nested range in the Map.
if _, loaded := m.LoadOrStore(42, "dummy"); loaded {
t.Fatalf("Nested Range loads unexpected value, want store a new value")
}
// Try to Store then LoadAndDelete the corresponding value with the key
// 42 to the Map. In this case, the key 42 and associated value should be
// removed from the Map. Therefore any future range won't observe key 42
// as we checked in above.
val := "sync.Map"
m.Store(42, val)
if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) {
t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val)
}
return true
})
// Remove key from Map on-the-fly.
m.Delete(key)
return true
})
// After a Range of Delete, all keys should be removed and any
// further Range won't invoke the callback. Hence length remains 0.
length := 0
m.Range(func(key, value any) bool {
length++
return true
})
if length != 0 {
t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0)
}
}
func TestCompareAndSwap_NonExistingKey(t *testutil.TestRunner) {
m := &sync.Map{}
if m.CompareAndSwap(m, nil, 42) {
// See https://go.dev/issue/51972#issuecomment-1126408637.
t.Fatalf("CompareAndSwap on a non-existing key succeeded")
}
}
func TestMapRangeNoAllocations(t *testutil.TestRunner) { // Issue 62404
var m sync.Map
allocs := testing.AllocsPerRun(10, func() {
m.Range(func(key, value any) bool {
return true
})
})
if allocs > 0 {
t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs)
}
}
...@@ -3,3 +3,6 @@ module mutex ...@@ -3,3 +3,6 @@ module mutex
go 1.22 go 1.22
toolchain go1.22.0 toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
// Portions of this code are derived from code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/mutex_test.go
//
// --- Original License Notice ---
//
// Copyright 2009 The Go Authors.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google LLC nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main package main
import ( import (
"fmt" "fmt"
"os"
"sync" "utils/testutil"
) )
func main() { func main() {
TestMutex() testutil.RunTest(TestSemaphore, "TestSemaphore")
} testutil.RunTest(TestMutex, "TestMutex")
testutil.RunTest(TestMutexFairness, "TestMutexFairness")
func TestMutex() {
m := new(sync.Mutex)
m.Lock()
if m.TryLock() {
_, _ = fmt.Fprintln(os.Stderr, "TryLock succeeded with mutex locked")
os.Exit(1)
}
m.Unlock()
if !m.TryLock() {
_, _ = fmt.Fprintln(os.Stderr, "TryLock failed with mutex unlocked")
os.Exit(1)
}
m.Unlock()
c := make(chan bool)
for i := 0; i < 10; i++ {
go HammerMutex(m, 1000, c)
}
for i := 0; i < 10; i++ {
<-c
}
fmt.Println("Mutex test passed") fmt.Println("Mutex test passed")
} }
func HammerMutex(m *sync.Mutex, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {
if i%3 == 0 {
if m.TryLock() {
m.Unlock()
}
continue
}
m.Lock()
m.Unlock()
}
cdone <- true
}
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/mutex_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"runtime"
. "sync"
"time"
"utils/testutil"
)
func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {
Runtime_Semacquire(s)
Runtime_Semrelease(s, false, 0)
}
cdone <- true
}
func TestSemaphore(t *testutil.TestRunner) {
s := new(uint32)
*s = 1
c := make(chan bool)
for i := 0; i < 10; i++ {
go HammerSemaphore(s, 1000, c)
}
for i := 0; i < 10; i++ {
<-c
}
}
func HammerMutex(m *Mutex, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {
if i%3 == 0 {
if m.TryLock() {
m.Unlock()
}
continue
}
m.Lock()
m.Unlock()
}
cdone <- true
}
func TestMutex(t *testutil.TestRunner) {
if n := runtime.SetMutexProfileFraction(1); n != 0 {
t.Logf("got mutexrate %d expected 0", n)
}
defer runtime.SetMutexProfileFraction(0)
m := new(Mutex)
m.Lock()
if m.TryLock() {
t.Fatalf("TryLock succeeded with mutex locked")
}
m.Unlock()
if !m.TryLock() {
t.Fatalf("TryLock failed with mutex unlocked")
}
m.Unlock()
c := make(chan bool)
for i := 0; i < 10; i++ {
go HammerMutex(m, 1000, c)
}
for i := 0; i < 10; i++ {
<-c
}
}
func TestMutexFairness(t *testutil.TestRunner) {
var mu Mutex
stop := make(chan bool)
defer close(stop)
go func() {
for {
mu.Lock()
time.Sleep(100 * time.Microsecond)
mu.Unlock()
select {
case <-stop:
return
default:
}
}
}()
done := make(chan bool, 1)
go func() {
for i := 0; i < 10; i++ {
time.Sleep(100 * time.Microsecond)
mu.Lock()
mu.Unlock()
}
done <- true
}()
select {
case <-done:
case <-time.After(10 * time.Second):
t.Fatalf("can't acquire Mutex in 10 seconds")
}
}
package main
import (
_ "unsafe" // Required for go:linkname
)
var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease
//go:linkname runtime_Semacquire sync.runtime_Semacquire
func runtime_Semacquire(s *uint32)
//go:linkname runtime_Semrelease sync.runtime_Semrelease
func runtime_Semrelease(s *uint32, handoff bool, skipframes int)
module oncefunc
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"utils/testutil"
)
func main() {
testutil.RunTest(TestOnceFunc, "TestOnceFunc")
testutil.RunTest(TestOnceValue, "TestOnceValue")
testutil.RunTest(TestOnceValues, "TestOnceValues")
testutil.RunTest(TestOnceFuncPanic, "TestOnceFuncPanic")
testutil.RunTest(TestOnceValuePanic, "TestOnceValuePanic")
testutil.RunTest(TestOnceValuesPanic, "TestOnceValuesPanic")
testutil.RunTest(TestOnceFuncPanicNil, "TestOnceFuncPanicNil")
testutil.RunTest(TestOnceFuncGoexit, "TestOnceFuncGoexit")
testutil.RunTest(TestOnceFuncPanicTraceback, "TestOnceFuncPanicTraceback")
testutil.RunTest(TestOnceXGC, "TestOnceXGC")
fmt.Println("OnceFunc tests passed")
}
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/oncefunc_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"bytes"
"math"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
"testing"
_ "unsafe"
"utils/testutil"
)
// We assume that the Once.Do tests have already covered parallelism.
func TestOnceFunc(t *testutil.TestRunner) {
calls := 0
f := sync.OnceFunc(func() { calls++ })
allocs := testing.AllocsPerRun(10, f)
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func TestOnceValue(t *testutil.TestRunner) {
calls := 0
f := sync.OnceValue(func() int {
calls++
return calls
})
allocs := testing.AllocsPerRun(10, func() { f() })
value := f()
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if value != 1 {
t.Errorf("want value==1, got %d", value)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func TestOnceValues(t *testutil.TestRunner) {
calls := 0
f := sync.OnceValues(func() (int, int) {
calls++
return calls, calls + 1
})
allocs := testing.AllocsPerRun(10, func() { f() })
v1, v2 := f()
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if v1 != 1 || v2 != 2 {
t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func testOncePanicX(t testing.TB, calls *int, f func()) {
testOncePanicWith(t, calls, f, func(label string, p any) {
if p != "x" {
t.Fatalf("%s: want panic %v, got %v", label, "x", p)
}
})
}
func testOncePanicWith(t testing.TB, calls *int, f func(), check func(label string, p any)) {
// Check that the each call to f panics with the same value, but the
// underlying function is only called once.
for _, label := range []string{"first time", "second time"} {
var p any
panicked := true
func() {
defer func() {
p = recover()
}()
f()
panicked = false
}()
if !panicked {
t.Fatalf("%s: f did not panic", label)
}
check(label, p)
}
if *calls != 1 {
t.Errorf("want calls==1, got %d", *calls)
}
}
func TestOnceFuncPanic(t *testutil.TestRunner) {
calls := 0
f := sync.OnceFunc(func() {
calls++
panic("x")
})
testOncePanicX(t, &calls, f)
}
func TestOnceValuePanic(t *testutil.TestRunner) {
calls := 0
f := sync.OnceValue(func() int {
calls++
panic("x")
})
testOncePanicX(t, &calls, func() { f() })
}
func TestOnceValuesPanic(t *testutil.TestRunner) {
calls := 0
f := sync.OnceValues(func() (int, int) {
calls++
panic("x")
})
testOncePanicX(t, &calls, func() { f() })
}
func TestOnceFuncPanicNil(t *testutil.TestRunner) {
calls := 0
f := sync.OnceFunc(func() {
calls++
panic(nil)
})
testOncePanicWith(t, &calls, f, func(label string, p any) {
switch p.(type) {
case nil, *runtime.PanicNilError:
return
}
t.Fatalf("%s: want nil panic, got %v", label, p)
})
}
func TestOnceFuncGoexit(t *testutil.TestRunner) {
// If f calls Goexit, the results are unspecified. But check that f doesn't
// get called twice.
calls := 0
f := sync.OnceFunc(func() {
calls++
runtime.Goexit()
})
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
defer func() { recover() }()
f()
}()
wg.Wait()
}
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
}
func TestOnceFuncPanicTraceback(t *testutil.TestRunner) {
// Test that on the first invocation of a OnceFunc, the stack trace goes all
// the way to the origin of the panic.
f := sync.OnceFunc(onceFuncPanic)
defer func() {
if p := recover(); p != "x" {
t.Fatalf("want panic %v, got %v", "x", p)
}
stack := debug.Stack()
//want := "sync_test.onceFuncPanic"
want := "main.onceFuncPanic"
if !bytes.Contains(stack, []byte(want)) {
t.Fatalf("want stack containing %v, got:\n%s", want, string(stack))
}
}()
f()
}
func onceFuncPanic() {
panic("x")
}
func TestOnceXGC(t *testutil.TestRunner) {
fns := map[string]func([]byte) func(){
"OnceFunc": func(buf []byte) func() {
return sync.OnceFunc(func() { buf[0] = 1 })
},
"OnceValue": func(buf []byte) func() {
f := sync.OnceValue(func() any { buf[0] = 1; return nil })
return func() { f() }
},
"OnceValues": func(buf []byte) func() {
f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil })
return func() { f() }
},
}
for n, fn := range fns {
t.Run(n, func(t testing.TB) {
buf := make([]byte, 1024)
var gc atomic.Bool
runtime.SetFinalizer(&buf[0], func(_ *byte) {
gc.Store(true)
})
f := fn(buf)
gcwaitfin()
if gc.Load() != false {
t.Fatal("wrapped function garbage collected too early")
}
f()
gcwaitfin()
if gc.Load() != true {
// Even if f is still alive, the function passed to Once(Func|Value|Values)
// is not kept alive after the first call to f.
t.Fatal("wrapped function should be garbage collected, but still live")
}
f()
})
}
}
// gcwaitfin performs garbage collection and waits for all finalizers to run.
func gcwaitfin() {
runtime.GC()
runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64)
}
//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue
func runtime_blockUntilEmptyFinalizerQueue(int64) bool
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/export_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
// Export for testing.
// var Runtime_Semacquire = runtime_Semacquire
// var Runtime_Semrelease = runtime_Semrelease
var Runtime_procPin = runtime_procPin
var Runtime_procUnpin = runtime_procUnpin
// poolDequeue testing.
type PoolDequeue interface {
PushHead(val any) bool
PopHead() (any, bool)
PopTail() (any, bool)
}
func NewPoolDequeue(n int) PoolDequeue {
d := &poolDequeue{
vals: make([]eface, n),
}
// For testing purposes, set the head and tail indexes close
// to wrapping around.
d.headTail.Store(d.pack(1<<dequeueBits-500, 1<<dequeueBits-500))
return d
}
func (d *poolDequeue) PushHead(val any) bool {
return d.pushHead(val)
}
func (d *poolDequeue) PopHead() (any, bool) {
return d.popHead()
}
func (d *poolDequeue) PopTail() (any, bool) {
return d.popTail()
}
func NewPoolChain() PoolDequeue {
return new(poolChain)
}
func (c *poolChain) PushHead(val any) bool {
c.pushHead(val)
return true
}
func (c *poolChain) PopHead() (any, bool) {
return c.popHead()
}
func (c *poolChain) PopTail() (any, bool) {
return c.popTail()
}
...@@ -3,3 +3,6 @@ module pool ...@@ -3,3 +3,6 @@ module pool
go 1.22 go 1.22
toolchain go1.22.0 toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
...@@ -2,37 +2,19 @@ package main ...@@ -2,37 +2,19 @@ package main
import ( import (
"fmt" "fmt"
"sync"
"utils/testutil"
) )
func main() { func main() {
var x sync.Pool testutil.RunTest(TestPool, "TestPool")
testutil.RunTest(TestPoolNew, "TestPoolNew")
x.Put(1) testutil.RunTest(TestPoolGC, "TestPoolGC")
x.Put(2) testutil.RunTest(TestPoolRelease, "TestPoolRelease")
testutil.RunTest(TestPoolStress, "TestPoolStress")
// try some concurrency! testutil.RunTest(TestPoolDequeue, "TestPoolDequeue")
var wg sync.WaitGroup testutil.RunTest(TestPoolChain, "TestPoolChain")
wg.Add(2) testutil.RunTest(TestNilPool, "TestNilPool")
go func() {
x.Put(3)
wg.Done()
}()
go func() {
x.Put(4)
wg.Done()
}()
wg.Wait()
wg.Add(4)
for i := 0; i < 4; i++ {
go func() {
x.Get()
wg.Done()
}()
}
wg.Wait()
fmt.Println("Pool test passed") fmt.Println("Pool test passed")
} }
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/pool_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"runtime"
"runtime/debug"
. "sync"
"sync/atomic"
"testing"
"time"
"utils/testutil"
)
var short bool = true
func TestPool(t *testutil.TestRunner) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var p Pool
if p.Get() != nil {
t.Fatal("expected empty")
}
// Make sure that the goroutine doesn't migrate to another P
// between Put and Get calls.
Runtime_procPin()
p.Put("a")
p.Put("b")
if g := p.Get(); g != "a" {
t.Fatalf("got %#v; want a", g)
}
if g := p.Get(); g != "b" {
t.Fatalf("got %#v; want b", g)
}
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil", g)
}
Runtime_procUnpin()
// Put in a large number of objects so they spill into
// stealable space.
for i := 0; i < 100; i++ {
p.Put("c")
}
// After one GC, the victim cache should keep them alive.
runtime.GC()
if g := p.Get(); g != "c" {
t.Fatalf("got %#v; want c after GC", g)
}
// A second GC should drop the victim cache.
runtime.GC()
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil after second GC", g)
}
}
func TestPoolNew(t *testutil.TestRunner) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
i := 0
p := Pool{
New: func() any {
i++
return i
},
}
if v := p.Get(); v != 1 {
t.Fatalf("got %v; want 1", v)
}
if v := p.Get(); v != 2 {
t.Fatalf("got %v; want 2", v)
}
// Make sure that the goroutine doesn't migrate to another P
// between Put and Get calls.
Runtime_procPin()
p.Put(42)
if v := p.Get(); v != 42 {
t.Fatalf("got %v; want 42", v)
}
Runtime_procUnpin()
if v := p.Get(); v != 3 {
t.Fatalf("got %v; want 3", v)
}
}
// Test that Pool does not hold pointers to previously cached resources.
func TestPoolGC(t *testutil.TestRunner) {
testPool(t, true)
}
// Test that Pool releases resources on GC.
func TestPoolRelease(t *testutil.TestRunner) {
testPool(t, false)
}
func testPool(t testing.TB, drain bool) {
var p Pool
const N = 100
loop:
for try := 0; try < 3; try++ {
if try == 1 && short {
break
}
var fin, fin1 uint32
for i := 0; i < N; i++ {
v := new(string)
runtime.SetFinalizer(v, func(vv *string) {
atomic.AddUint32(&fin, 1)
})
p.Put(v)
}
if drain {
for i := 0; i < N; i++ {
p.Get()
}
}
for i := 0; i < 5; i++ {
runtime.GC()
time.Sleep(time.Duration(i*100+10) * time.Millisecond)
// 1 pointer can remain on stack or elsewhere
if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
continue loop
}
}
t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
}
}
func TestPoolStress(t *testutil.TestRunner) {
const P = 10
N := int(1e6)
if short {
N /= 100
}
var p Pool
done := make(chan bool)
for i := 0; i < P; i++ {
go func() {
var v any = 0
for j := 0; j < N; j++ {
if v == nil {
v = 0
}
p.Put(v)
v = p.Get()
if v != nil && v.(int) != 0 {
t.Errorf("expect 0, got %v", v)
break
}
}
done <- true
}()
}
for i := 0; i < P; i++ {
<-done
}
}
func TestPoolDequeue(t *testutil.TestRunner) {
testPoolDequeue(t, NewPoolDequeue(16))
}
func TestPoolChain(t *testutil.TestRunner) {
testPoolDequeue(t, NewPoolChain())
}
func testPoolDequeue(t testing.TB, d PoolDequeue) {
const P = 10
var N int = 2e6
if short {
N = 1e3
}
have := make([]int32, N)
var stop int32
var wg WaitGroup
record := func(val int) {
atomic.AddInt32(&have[val], 1)
if val == N-1 {
atomic.StoreInt32(&stop, 1)
}
}
// Start P-1 consumers.
for i := 1; i < P; i++ {
wg.Add(1)
go func() {
fail := 0
for atomic.LoadInt32(&stop) == 0 {
val, ok := d.PopTail()
if ok {
fail = 0
record(val.(int))
} else {
// Speed up the test by
// allowing the pusher to run.
if fail++; fail%100 == 0 {
runtime.Gosched()
}
}
}
wg.Done()
}()
}
// Start 1 producer.
nPopHead := 0
wg.Add(1)
go func() {
for j := 0; j < N; j++ {
for !d.PushHead(j) {
// Allow a popper to run.
runtime.Gosched()
}
if j%10 == 0 {
val, ok := d.PopHead()
if ok {
nPopHead++
record(val.(int))
}
}
}
wg.Done()
}()
wg.Wait()
// Check results.
for i, count := range have {
if count != 1 {
t.Errorf("expected have[%d] = 1, got %d", i, count)
}
}
// Check that at least some PopHeads succeeded. We skip this
// check in short mode because it's common enough that the
// queue will stay nearly empty all the time and a PopTail
// will happen during the window between every PushHead and
// PopHead.
if !short && nPopHead == 0 {
t.Errorf("popHead never succeeded")
}
}
func TestNilPool(t *testutil.TestRunner) {
catch := func() {
if recover() == nil {
t.Error("expected panic")
}
}
var p *Pool
t.Run("Get", func(t testing.TB) {
defer catch()
if p.Get() != nil {
t.Error("expected empty")
}
t.Error("should have panicked already")
})
t.Run("Put", func(t testing.TB) {
defer catch()
p.Put("a")
t.Error("should have panicked already")
})
}
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/poolqueue.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"sync/atomic"
"unsafe"
)
// poolDequeue is a lock-free fixed-size single-producer,
// multi-consumer queue. The single producer can both push and pop
// from the head, and consumers can pop from the tail.
//
// It has the added feature that it nils out unused slots to avoid
// unnecessary retention of objects. This is important for sync.Pool,
// but not typically a property considered in the literature.
type poolDequeue struct {
// headTail packs together a 32-bit head index and a 32-bit
// tail index. Both are indexes into vals modulo len(vals)-1.
//
// tail = index of oldest data in queue
// head = index of next slot to fill
//
// Slots in the range [tail, head) are owned by consumers.
// A consumer continues to own a slot outside this range until
// it nils the slot, at which point ownership passes to the
// producer.
//
// The head index is stored in the most-significant bits so
// that we can atomically add to it and the overflow is
// harmless.
headTail atomic.Uint64
// vals is a ring buffer of interface{} values stored in this
// dequeue. The size of this must be a power of 2.
//
// vals[i].typ is nil if the slot is empty and non-nil
// otherwise. A slot is still in use until *both* the tail
// index has moved beyond it and typ has been set to nil. This
// is set to nil atomically by the consumer and read
// atomically by the producer.
vals []eface
}
type eface struct {
typ, val unsafe.Pointer
}
const dequeueBits = 32
// dequeueLimit is the maximum size of a poolDequeue.
//
// This must be at most (1<<dequeueBits)/2 because detecting fullness
// depends on wrapping around the ring buffer without wrapping around
// the index. We divide by 4 so this fits in an int on 32-bit.
const dequeueLimit = (1 << dequeueBits) / 4
// dequeueNil is used in poolDequeue to represent interface{}(nil).
// Since we use nil to represent empty slots, we need a sentinel value
// to represent nil.
type dequeueNil *struct{}
func (d *poolDequeue) unpack(ptrs uint64) (head, tail uint32) {
const mask = 1<<dequeueBits - 1
head = uint32((ptrs >> dequeueBits) & mask)
tail = uint32(ptrs & mask)
return
}
func (d *poolDequeue) pack(head, tail uint32) uint64 {
const mask = 1<<dequeueBits - 1
return (uint64(head) << dequeueBits) |
uint64(tail&mask)
}
// pushHead adds val at the head of the queue. It returns false if the
// queue is full. It must only be called by a single producer.
func (d *poolDequeue) pushHead(val any) bool {
ptrs := d.headTail.Load()
head, tail := d.unpack(ptrs)
if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
// Queue is full.
return false
}
slot := &d.vals[head&uint32(len(d.vals)-1)]
// Check if the head slot has been released by popTail.
typ := atomic.LoadPointer(&slot.typ)
if typ != nil {
// Another goroutine is still cleaning up the tail, so
// the queue is actually still full.
return false
}
// The head slot is free, so we own it.
if val == nil {
val = dequeueNil(nil)
}
*(*any)(unsafe.Pointer(slot)) = val
// Increment head. This passes ownership of slot to popTail
// and acts as a store barrier for writing the slot.
d.headTail.Add(1 << dequeueBits)
return true
}
// popHead removes and returns the element at the head of the queue.
// It returns false if the queue is empty. It must only be called by a
// single producer.
func (d *poolDequeue) popHead() (any, bool) {
var slot *eface
for {
ptrs := d.headTail.Load()
head, tail := d.unpack(ptrs)
if tail == head {
// Queue is empty.
return nil, false
}
// Confirm tail and decrement head. We do this before
// reading the value to take back ownership of this
// slot.
head--
ptrs2 := d.pack(head, tail)
if d.headTail.CompareAndSwap(ptrs, ptrs2) {
// We successfully took back slot.
slot = &d.vals[head&uint32(len(d.vals)-1)]
break
}
}
val := *(*any)(unsafe.Pointer(slot))
if val == dequeueNil(nil) {
val = nil
}
// Zero the slot. Unlike popTail, this isn't racing with
// pushHead, so we don't need to be careful here.
*slot = eface{}
return val, true
}
// popTail removes and returns the element at the tail of the queue.
// It returns false if the queue is empty. It may be called by any
// number of consumers.
func (d *poolDequeue) popTail() (any, bool) {
var slot *eface
for {
ptrs := d.headTail.Load()
head, tail := d.unpack(ptrs)
if tail == head {
// Queue is empty.
return nil, false
}
// Confirm head and tail (for our speculative check
// above) and increment tail. If this succeeds, then
// we own the slot at tail.
ptrs2 := d.pack(head, tail+1)
if d.headTail.CompareAndSwap(ptrs, ptrs2) {
// Success.
slot = &d.vals[tail&uint32(len(d.vals)-1)]
break
}
}
// We now own slot.
val := *(*any)(unsafe.Pointer(slot))
if val == dequeueNil(nil) {
val = nil
}
// Tell pushHead that we're done with this slot. Zeroing the
// slot is also important so we don't leave behind references
// that could keep this object live longer than necessary.
//
// We write to val first and then publish that we're done with
// this slot by atomically writing to typ.
slot.val = nil
atomic.StorePointer(&slot.typ, nil)
// At this point pushHead owns the slot.
return val, true
}
// poolChain is a dynamically-sized version of poolDequeue.
//
// This is implemented as a doubly-linked list queue of poolDequeues
// where each dequeue is double the size of the previous one. Once a
// dequeue fills up, this allocates a new one and only ever pushes to
// the latest dequeue. Pops happen from the other end of the list and
// once a dequeue is exhausted, it gets removed from the list.
type poolChain struct {
// head is the poolDequeue to push to. This is only accessed
// by the producer, so doesn't need to be synchronized.
head *poolChainElt
// tail is the poolDequeue to popTail from. This is accessed
// by consumers, so reads and writes must be atomic.
tail *poolChainElt
}
type poolChainElt struct {
poolDequeue
// next and prev link to the adjacent poolChainElts in this
// poolChain.
//
// next is written atomically by the producer and read
// atomically by the consumer. It only transitions from nil to
// non-nil.
//
// prev is written atomically by the consumer and read
// atomically by the producer. It only transitions from
// non-nil to nil.
next, prev *poolChainElt
}
func storePoolChainElt(pp **poolChainElt, v *poolChainElt) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
}
func loadPoolChainElt(pp **poolChainElt) *poolChainElt {
return (*poolChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
}
func (c *poolChain) pushHead(val any) {
d := c.head
if d == nil {
// Initialize the chain.
const initSize = 8 // Must be a power of 2
d = new(poolChainElt)
d.vals = make([]eface, initSize)
c.head = d
storePoolChainElt(&c.tail, d)
}
if d.pushHead(val) {
return
}
// The current dequeue is full. Allocate a new one of twice
// the size.
newSize := len(d.vals) * 2
if newSize >= dequeueLimit {
// Can't make it any bigger.
newSize = dequeueLimit
}
d2 := &poolChainElt{prev: d}
d2.vals = make([]eface, newSize)
c.head = d2
storePoolChainElt(&d.next, d2)
d2.pushHead(val)
}
func (c *poolChain) popHead() (any, bool) {
d := c.head
for d != nil {
if val, ok := d.popHead(); ok {
return val, ok
}
// There may still be unconsumed elements in the
// previous dequeue, so try backing up.
d = loadPoolChainElt(&d.prev)
}
return nil, false
}
func (c *poolChain) popTail() (any, bool) {
d := loadPoolChainElt(&c.tail)
if d == nil {
return nil, false
}
for {
// It's important that we load the next pointer
// *before* popping the tail. In general, d may be
// transiently empty, but if next is non-nil before
// the pop and the pop fails, then d is permanently
// empty, which is the only condition under which it's
// safe to drop d from the chain.
d2 := loadPoolChainElt(&d.next)
if val, ok := d.popTail(); ok {
return val, ok
}
if d2 == nil {
// This is the only dequeue. It's empty right
// now, but could be pushed to in the future.
return nil, false
}
// The tail of the chain has been drained, so move on
// to the next dequeue. Try to drop it from the chain
// so the next pop doesn't have to look at the empty
// dequeue again.
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
// We won the race. Clear the prev pointer so
// the garbage collector can collect the empty
// dequeue and so popHead doesn't back up
// further than necessary.
storePoolChainElt(&d2.prev, nil)
}
d = d2
}
}
package main
import (
_ "unsafe" // Required for go:linkname
)
//go:linkname runtime_procPin runtime.procPin
func runtime_procPin() int
//go:linkname runtime_procUnpin runtime.procUnpin
func runtime_procUnpin()
module mtvalue
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"utils/testutil"
)
func main() {
testutil.RunTest(TestValue, "TestValue")
testutil.RunTest(TestValueLarge, "TestValueLarge")
testutil.RunTest(TestValuePanic, "TestValuePanic")
testutil.RunTest(TestValueConcurrent, "TestValueConcurrent")
testutil.RunTest(TestValue_Swap, "TestValue_Swap")
testutil.RunTest(TestValueSwapConcurrent, "TestValueSwapConcurrent")
testutil.RunTest(TestValue_CompareAndSwap, "TestValue_CompareAndSwap")
testutil.RunTest(TestValueCompareAndSwapConcurrent, "TestValueCompareAndSwapConcurrent")
fmt.Println("Value tests passed")
}
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/value_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"math/rand"
"runtime"
"strconv"
"sync"
"sync/atomic"
. "sync/atomic"
"testing"
"utils/testutil"
)
var short bool = true
func TestValue(t *testutil.TestRunner) {
var v Value
if v.Load() != nil {
t.Fatal("initial Value is not nil")
}
v.Store(42)
x := v.Load()
if xx, ok := x.(int); !ok || xx != 42 {
t.Fatalf("wrong value: got %+v, want 42", x)
}
v.Store(84)
x = v.Load()
if xx, ok := x.(int); !ok || xx != 84 {
t.Fatalf("wrong value: got %+v, want 84", x)
}
}
func TestValueLarge(t *testutil.TestRunner) {
var v Value
v.Store("foo")
x := v.Load()
if xx, ok := x.(string); !ok || xx != "foo" {
t.Fatalf("wrong value: got %+v, want foo", x)
}
v.Store("barbaz")
x = v.Load()
if xx, ok := x.(string); !ok || xx != "barbaz" {
t.Fatalf("wrong value: got %+v, want barbaz", x)
}
}
func TestValuePanic(t *testutil.TestRunner) {
const nilErr = "sync/atomic: store of nil value into Value"
const badErr = "sync/atomic: store of inconsistently typed value into Value"
var v Value
func() {
defer func() {
err := recover()
if err != nilErr {
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
}
}()
v.Store(nil)
}()
v.Store(42)
func() {
defer func() {
err := recover()
if err != badErr {
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr)
}
}()
v.Store("foo")
}()
func() {
defer func() {
err := recover()
if err != nilErr {
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
}
}()
v.Store(nil)
}()
}
func TestValueConcurrent(t *testutil.TestRunner) {
tests := [][]any{
{uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)},
{uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)},
{uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)},
{complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)},
}
p := 4 * runtime.GOMAXPROCS(0)
N := int(1e5)
if short {
p /= 2
//N = 1e3
N = 1e2
}
for _, test := range tests {
var v Value
done := make(chan bool, p)
for i := 0; i < p; i++ {
go func() {
r := rand.New(rand.NewSource(rand.Int63()))
expected := true
loop:
for j := 0; j < N; j++ {
x := test[r.Intn(len(test))]
v.Store(x)
x = v.Load()
for _, x1 := range test {
if x == x1 {
continue loop
}
}
t.Logf("loaded unexpected value %+v, want %+v", x, test)
expected = false
break
}
done <- expected
}()
}
for i := 0; i < p; i++ {
if !<-done {
t.FailNow()
}
}
}
}
func BenchmarkValueRead(b *testing.B) {
var v Value
v.Store(new(int))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
x := v.Load().(*int)
if *x != 0 {
b.Fatalf("wrong value: got %v, want 0", *x)
}
}
})
}
var Value_SwapTests = []struct {
init any
new any
want any
err any
}{
{init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"},
{init: nil, new: true, want: nil, err: nil},
{init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"},
{init: true, new: false, want: true, err: nil},
}
func TestValue_Swap(t *testutil.TestRunner) {
for i, tt := range Value_SwapTests {
t.Run(strconv.Itoa(i), func(t testing.TB) {
var v Value
if tt.init != nil {
v.Store(tt.init)
}
defer func() {
err := recover()
switch {
case tt.err == nil && err != nil:
t.Errorf("should not panic, got %v", err)
case tt.err != nil && err == nil:
t.Errorf("should panic %v, got <nil>", tt.err)
}
}()
if got := v.Swap(tt.new); got != tt.want {
t.Errorf("got %v, want %v", got, tt.want)
}
if got := v.Load(); got != tt.new {
t.Errorf("got %v, want %v", got, tt.new)
}
})
}
}
func TestValueSwapConcurrent(t *testutil.TestRunner) {
var v Value
var count uint64
var g sync.WaitGroup
var m, n uint64 = 10000, 10000
if short {
//m = 1000
//n = 1000
m = 10
n = 10
}
for i := uint64(0); i < m*n; i += n {
i := i
g.Add(1)
go func() {
var c uint64
for new := i; new < i+n; new++ {
if old := v.Swap(new); old != nil {
c += old.(uint64)
}
}
atomic.AddUint64(&count, c)
g.Done()
}()
}
g.Wait()
if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want {
t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want)
}
}
var heapA, heapB = struct{ uint }{0}, struct{ uint }{0}
var Value_CompareAndSwapTests = []struct {
init any
new any
old any
want bool
err any
}{
{init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"},
{init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"},
{init: nil, new: true, old: true, want: false, err: nil},
{init: nil, new: true, old: nil, want: true, err: nil},
{init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"},
{init: true, new: true, old: false, want: false, err: nil},
{init: true, new: true, old: true, want: true, err: nil},
{init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil},
}
func TestValue_CompareAndSwap(t *testutil.TestRunner) {
for i, tt := range Value_CompareAndSwapTests {
t.Run(strconv.Itoa(i), func(t testing.TB) {
var v Value
if tt.init != nil {
v.Store(tt.init)
}
defer func() {
err := recover()
switch {
case tt.err == nil && err != nil:
t.Errorf("got %v, wanted no panic", err)
case tt.err != nil && err == nil:
t.Errorf("did not panic, want %v", tt.err)
}
}()
if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want {
t.Errorf("got %v, want %v", got, tt.want)
}
})
}
}
func TestValueCompareAndSwapConcurrent(t *testutil.TestRunner) {
var v Value
var w sync.WaitGroup
v.Store(0)
m, n := 1000, 100
if short {
//m = 100
//n = 100
m = 10
n = 10
}
for i := 0; i < m; i++ {
i := i
w.Add(1)
go func() {
for j := i; j < m*n; runtime.Gosched() {
if v.CompareAndSwap(j, j+1) {
j += m
}
}
w.Done()
}()
}
w.Wait()
if stop := v.Load().(int); stop != m*n {
t.Errorf("did not get to %v, stopped at %v", m*n, stop)
}
}
...@@ -3,3 +3,6 @@ module wg ...@@ -3,3 +3,6 @@ module wg
go 1.22 go 1.22
toolchain go1.22.0 toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
...@@ -2,39 +2,15 @@ package main ...@@ -2,39 +2,15 @@ package main
import ( import (
"fmt" "fmt"
"sync"
"sync/atomic" "utils/testutil"
) )
func main() { func main() {
// try some concurrency! testutil.RunTest(TestWaitGroup, "TestWaitGroup")
var wg sync.WaitGroup testutil.RunTest(TestWaitGroupMisuse, "TestWaitGroupMisuse")
wg.Add(2) testutil.RunTest(TestWaitGroupRace, "TestWaitGroupRace")
var x atomic.Int32 testutil.RunTest(TestWaitGroupAlign, "TestWaitGroupAlign")
go func() {
x.Add(2)
wg.Done()
}()
go func() {
x.Add(40)
wg.Done()
}()
wg.Wait()
fmt.Printf("waitgroup result: %d\n", x.Load())
// channels fmt.Println("WaitGroup tests passed")
a := make(chan int, 1)
b := make(chan int)
c := make(chan int)
go func() {
t0 := <-a
b <- t0
}()
go func() {
t1 := <-b
c <- t1
}()
a <- 1234
out := <-c
fmt.Printf("channels result: %d\n", out)
} }
// This file is based on code written by The Go Authors.
// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/waitgroup_test.go
//
// --- Original License Notice ---
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
. "sync"
"sync/atomic"
"testing"
"utils/testutil"
)
func testWaitGroup(t testing.TB, wg1 *WaitGroup, wg2 *WaitGroup) {
n := 16
wg1.Add(n)
wg2.Add(n)
exited := make(chan bool, n)
for i := 0; i != n; i++ {
go func() {
wg1.Done()
wg2.Wait()
exited <- true
}()
}
wg1.Wait()
for i := 0; i != n; i++ {
select {
case <-exited:
t.Fatal("WaitGroup released group too soon")
default:
}
wg2.Done()
}
for i := 0; i != n; i++ {
<-exited // Will block if barrier fails to unlock someone.
}
}
func TestWaitGroup(t *testutil.TestRunner) {
wg1 := &WaitGroup{}
wg2 := &WaitGroup{}
// Run the same test a few times to ensure barrier is in a proper state.
for i := 0; i != 8; i++ {
testWaitGroup(t, wg1, wg2)
}
}
func TestWaitGroupMisuse(t *testutil.TestRunner) {
defer func() {
err := recover()
if err != "sync: negative WaitGroup counter" {
t.Fatalf("Unexpected panic: %#v", err)
}
}()
wg := &WaitGroup{}
wg.Add(1)
wg.Done()
wg.Done()
t.Fatal("Should panic")
}
func TestWaitGroupRace(t *testutil.TestRunner) {
// Run this test for about 1ms.
for i := 0; i < 1000; i++ {
wg := &WaitGroup{}
n := new(int32)
// spawn goroutine 1
wg.Add(1)
go func() {
atomic.AddInt32(n, 1)
wg.Done()
}()
// spawn goroutine 2
wg.Add(1)
go func() {
atomic.AddInt32(n, 1)
wg.Done()
}()
// Wait for goroutine 1 and 2
wg.Wait()
if atomic.LoadInt32(n) != 2 {
t.Fatal("Spurious wakeup from Wait")
}
}
}
func TestWaitGroupAlign(t *testutil.TestRunner) {
type X struct {
x byte
wg WaitGroup
}
var x X
x.wg.Add(1)
go func(x *X) {
x.wg.Done()
}(&x)
x.wg.Wait()
}
module utilscheck
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"utils/testutil"
)
func main() {
testutil.RunTest(ShouldFail, "ShouldFail")
fmt.Println("Passed test that should have failed")
}
func ShouldFail(t *testutil.TestRunner) {
t.Fail()
}
module utilscheck2
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"testing"
"utils/testutil"
)
func main() {
testutil.RunTest(ShouldFail, "ShouldFail")
fmt.Println("Passed test that should have failed")
}
func ShouldFail(t *testutil.TestRunner) {
t.Run("subtest 1", func(t testing.TB) {
// Do something
})
t.Run("subtest 2", func(t testing.TB) {
t.Fail()
})
}
module utilscheck3
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"testing"
"utils/testutil"
)
func main() {
testutil.RunTest(ShouldFail, "ShouldFail")
fmt.Println("Passed test that should have failed")
}
func ShouldFail(t *testutil.TestRunner) {
t.Run("panic test", func(t testing.TB) {
panic("oops")
})
}
module utilscheck4
go 1.22
toolchain go1.22.0
require utils v0.0.0
replace utils => ../../utils
package main
import (
"fmt"
"utils/testutil"
)
func main() {
testutil.RunTest(ShouldFail, "ShouldFail")
fmt.Println("Passed test that should have failed")
}
func ShouldFail(t *testutil.TestRunner) {
panic("oops")
}
module utils
go 1.22
toolchain go1.22.0
package testutil
import (
"fmt"
"os"
"runtime"
"sync"
"testing"
)
func RunTest(testFunc func(*TestRunner), name string) {
goRunTest(name, testFunc, newTestRunner(name))
}
type TestRunner struct {
*mockT
baseName string
}
func newTestRunner(baseName string) *TestRunner {
return &TestRunner{mockT: newMockT(), baseName: baseName}
}
func (r *TestRunner) Run(name string, testFunc func(t testing.TB)) bool {
testName := r.baseName
if name != "" {
testName = fmt.Sprintf("%v (%v)", r.baseName, name)
}
var tester testing.TB = r
goRunTest(testName, testFunc, tester)
return !r.Failed()
}
func goRunTest[T testing.TB](testName string, testFunc func(t T), t T) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer func() {
if err := recover(); err != nil {
fmt.Printf("Test panicked: %v\n\t%v", testName, err)
os.Exit(1)
}
if t.Failed() {
fmt.Printf("Test failed: %v\n", testName)
os.Exit(1)
} else if t.Skipped() {
fmt.Printf("Test skipped: %v\n", testName)
} else {
fmt.Printf("Test passed: %v\n", testName)
}
wg.Done()
}()
testFunc(t)
}()
wg.Wait()
}
type mockT struct {
*testing.T
mu sync.Mutex
failed bool
skipped bool
}
var _ testing.TB = (*mockT)(nil)
func newMockT() *mockT {
return &mockT{}
}
func (t *mockT) Cleanup(func()) {
t.Fatalf("Cleanup not supported")
}
func (t *mockT) Error(args ...any) {
fmt.Print(args...)
t.fail()
}
func (t *mockT) Errorf(format string, args ...any) {
fmt.Printf(format, args...)
t.fail()
}
func (t *mockT) Fail() {
t.fail()
}
func (t *mockT) FailNow() {
fmt.Println("Fatal")
t.fail()
}
func (t *mockT) Failed() bool {
t.mu.Lock()
defer t.mu.Unlock()
return t.failed
}
func (t *mockT) Fatal(args ...any) {
fmt.Print(args...)
t.fail()
}
func (t *mockT) Fatalf(format string, args ...any) {
fmt.Printf(format, args...)
t.fail()
}
func (t *mockT) Helper() {}
func (t *mockT) Log(args ...any) {
fmt.Print(args...)
}
func (t *mockT) Logf(format string, args ...any) {
fmt.Printf(format, args...)
}
func (t *mockT) Name() string {
return ""
}
func (t *mockT) Setenv(key, value string) {
t.Fatalf("Setenv not supported")
}
func (t *mockT) Skip(args ...any) {
fmt.Println(args...)
t.skip()
}
func (t *mockT) SkipNow() {
t.skip()
}
func (t *mockT) Skipf(format string, args ...any) {
fmt.Printf(format, args...)
t.skip()
}
func (t *mockT) Skipped() bool {
t.mu.Lock()
defer t.mu.Unlock()
return t.skipped
}
func (t *mockT) skip() {
t.mu.Lock()
defer t.mu.Unlock()
t.skipped = true
runtime.Goexit()
}
func (t *mockT) fail() {
t.mu.Lock()
defer t.mu.Unlock()
t.failed = true
runtime.Goexit()
}
func (t *mockT) TempDir() string {
t.Fatalf("TempDir not supported")
return ""
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment