diff --git a/cl/_testrt/syncmap/in.go b/cl/_testrt/syncmap/in.go new file mode 100644 index 000000000..d734860ab --- /dev/null +++ b/cl/_testrt/syncmap/in.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "sync" +) + +func main() { + var m sync.Map + m.Store(1, "hello") + m.Store("1", 100) + v, ok := m.Load("1") + fmt.Println(v, ok) + m.Range(func(k, v interface{}) bool { + fmt.Printf("%#v %v\n", k, v) + return true + }) +} diff --git a/cl/_testrt/syncmap/out.ll b/cl/_testrt/syncmap/out.ll new file mode 100644 index 000000000..a7564eda4 --- /dev/null +++ b/cl/_testrt/syncmap/out.ll @@ -0,0 +1,225 @@ +; ModuleID = 'main' +source_filename = "main" + +%"github.com/goplus/llgo/internal/runtime.eface" = type { ptr, ptr } +%"github.com/goplus/llgo/internal/runtime.String" = type { ptr, i64 } +%"github.com/goplus/llgo/internal/runtime.Slice" = type { ptr, i64, i64 } +%"github.com/goplus/llgo/internal/runtime.iface" = type { ptr, ptr } + +@"main.init$guard" = global i1 false, align 1 +@__llgo_argc = global i32 0, align 4 +@__llgo_argv = global ptr null, align 8 +@_llgo_int = linkonce global ptr null, align 8 +@0 = private unnamed_addr constant [5 x i8] c"hello", align 1 +@_llgo_string = linkonce global ptr null, align 8 +@1 = private unnamed_addr constant [1 x i8] c"1", align 1 +@_llgo_bool = linkonce global ptr null, align 8 +@2 = private unnamed_addr constant [7 x i8] c"%#v %v\0A", align 1 + +define void @main.init() { +_llgo_0: + %0 = load i1, ptr @"main.init$guard", align 1 + br i1 %0, label %_llgo_2, label %_llgo_1 + +_llgo_1: ; preds = %_llgo_0 + store i1 true, ptr @"main.init$guard", align 1 + call void @fmt.init() + call void @sync.init() + call void @"main.init$after"() + br label %_llgo_2 + +_llgo_2: ; preds = %_llgo_1, %_llgo_0 + ret void +} + +define i32 @main(i32 %0, ptr %1) { +_llgo_0: + store i32 %0, ptr @__llgo_argc, align 4 + store ptr %1, ptr @__llgo_argv, align 8 + call void @"github.com/goplus/llgo/internal/runtime.init"() + call void @main.init() + %2 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocZ"(i64 32) + %3 = load ptr, ptr @_llgo_int, align 8 + %4 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %5 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %4, i32 0, i32 0 + store ptr %3, ptr %5, align 8 + %6 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %4, i32 0, i32 1 + store ptr inttoptr (i64 1 to ptr), ptr %6, align 8 + %7 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %4, align 8 + %8 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %9 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 0 + store ptr @0, ptr %9, align 8 + %10 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 1 + store i64 5, ptr %10, align 4 + %11 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %8, align 8 + %12 = load ptr, ptr @_llgo_string, align 8 + %13 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 16) + store %"github.com/goplus/llgo/internal/runtime.String" %11, ptr %13, align 8 + %14 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %15 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %14, i32 0, i32 0 + store ptr %12, ptr %15, align 8 + %16 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %14, i32 0, i32 1 + store ptr %13, ptr %16, align 8 + %17 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %14, align 8 + call void @"sync.(*Map).Store"(ptr %2, %"github.com/goplus/llgo/internal/runtime.eface" %7, %"github.com/goplus/llgo/internal/runtime.eface" %17) + %18 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %19 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %18, i32 0, i32 0 + store ptr @1, ptr %19, align 8 + %20 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %18, i32 0, i32 1 + store i64 1, ptr %20, align 4 + %21 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %18, align 8 + %22 = load ptr, ptr @_llgo_string, align 8 + %23 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 16) + store %"github.com/goplus/llgo/internal/runtime.String" %21, ptr %23, align 8 + %24 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %25 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %24, i32 0, i32 0 + store ptr %22, ptr %25, align 8 + %26 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %24, i32 0, i32 1 + store ptr %23, ptr %26, align 8 + %27 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %24, align 8 + %28 = load ptr, ptr @_llgo_int, align 8 + %29 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %30 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %29, i32 0, i32 0 + store ptr %28, ptr %30, align 8 + %31 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %29, i32 0, i32 1 + store ptr inttoptr (i64 100 to ptr), ptr %31, align 8 + %32 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %29, align 8 + call void @"sync.(*Map).Store"(ptr %2, %"github.com/goplus/llgo/internal/runtime.eface" %27, %"github.com/goplus/llgo/internal/runtime.eface" %32) + %33 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %34 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %33, i32 0, i32 0 + store ptr @1, ptr %34, align 8 + %35 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %33, i32 0, i32 1 + store i64 1, ptr %35, align 4 + %36 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %33, align 8 + %37 = load ptr, ptr @_llgo_string, align 8 + %38 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 16) + store %"github.com/goplus/llgo/internal/runtime.String" %36, ptr %38, align 8 + %39 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %40 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %39, i32 0, i32 0 + store ptr %37, ptr %40, align 8 + %41 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %39, i32 0, i32 1 + store ptr %38, ptr %41, align 8 + %42 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %39, align 8 + %43 = call { %"github.com/goplus/llgo/internal/runtime.eface", i1 } @"sync.(*Map).Load"(ptr %2, %"github.com/goplus/llgo/internal/runtime.eface" %42) + %44 = extractvalue { %"github.com/goplus/llgo/internal/runtime.eface", i1 } %43, 0 + %45 = extractvalue { %"github.com/goplus/llgo/internal/runtime.eface", i1 } %43, 1 + %46 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocZ"(i64 32) + %47 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %46, i64 0 + store %"github.com/goplus/llgo/internal/runtime.eface" %44, ptr %47, align 8 + %48 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %46, i64 1 + %49 = load ptr, ptr @_llgo_bool, align 8 + %50 = sext i1 %45 to i64 + %51 = inttoptr i64 %50 to ptr + %52 = alloca %"github.com/goplus/llgo/internal/runtime.eface", align 8 + %53 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %52, i32 0, i32 0 + store ptr %49, ptr %53, align 8 + %54 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %52, i32 0, i32 1 + store ptr %51, ptr %54, align 8 + %55 = load %"github.com/goplus/llgo/internal/runtime.eface", ptr %52, align 8 + store %"github.com/goplus/llgo/internal/runtime.eface" %55, ptr %48, align 8 + %56 = alloca %"github.com/goplus/llgo/internal/runtime.Slice", align 8 + %57 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 0 + store ptr %46, ptr %57, align 8 + %58 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 1 + store i64 2, ptr %58, align 4 + %59 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 2 + store i64 2, ptr %59, align 4 + %60 = load %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, align 8 + %61 = call { i64, %"github.com/goplus/llgo/internal/runtime.iface" } @fmt.Println(%"github.com/goplus/llgo/internal/runtime.Slice" %60) + %62 = alloca { ptr, ptr }, align 8 + %63 = getelementptr inbounds { ptr, ptr }, ptr %62, i32 0, i32 0 + store ptr @"__llgo_stub.main.main$1", ptr %63, align 8 + %64 = getelementptr inbounds { ptr, ptr }, ptr %62, i32 0, i32 1 + store ptr null, ptr %64, align 8 + %65 = load { ptr, ptr }, ptr %62, align 8 + call void @"sync.(*Map).Range"(ptr %2, { ptr, ptr } %65) + ret i32 0 +} + +define i1 @"main.main$1"(%"github.com/goplus/llgo/internal/runtime.eface" %0, %"github.com/goplus/llgo/internal/runtime.eface" %1) { +_llgo_0: + %2 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocZ"(i64 32) + %3 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %2, i64 0 + store %"github.com/goplus/llgo/internal/runtime.eface" %0, ptr %3, align 8 + %4 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.eface", ptr %2, i64 1 + store %"github.com/goplus/llgo/internal/runtime.eface" %1, ptr %4, align 8 + %5 = alloca %"github.com/goplus/llgo/internal/runtime.Slice", align 8 + %6 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %5, i32 0, i32 0 + store ptr %2, ptr %6, align 8 + %7 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %5, i32 0, i32 1 + store i64 2, ptr %7, align 4 + %8 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %5, i32 0, i32 2 + store i64 2, ptr %8, align 4 + %9 = load %"github.com/goplus/llgo/internal/runtime.Slice", ptr %5, align 8 + %10 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %11 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %10, i32 0, i32 0 + store ptr @2, ptr %11, align 8 + %12 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %10, i32 0, i32 1 + store i64 7, ptr %12, align 4 + %13 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %10, align 8 + %14 = call { i64, %"github.com/goplus/llgo/internal/runtime.iface" } @fmt.Printf(%"github.com/goplus/llgo/internal/runtime.String" %13, %"github.com/goplus/llgo/internal/runtime.Slice" %9) + ret i1 true +} + +declare void @fmt.init() + +declare void @sync.init() + +declare void @"github.com/goplus/llgo/internal/runtime.init"() + +declare ptr @"github.com/goplus/llgo/internal/runtime.AllocZ"(i64) + +declare void @"sync.(*Map).Store"(ptr, %"github.com/goplus/llgo/internal/runtime.eface", %"github.com/goplus/llgo/internal/runtime.eface") + +define void @"main.init$after"() { +_llgo_0: + %0 = load ptr, ptr @_llgo_int, align 8 + %1 = icmp eq ptr %0, null + br i1 %1, label %_llgo_1, label %_llgo_2 + +_llgo_1: ; preds = %_llgo_0 + %2 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 34) + store ptr %2, ptr @_llgo_int, align 8 + br label %_llgo_2 + +_llgo_2: ; preds = %_llgo_1, %_llgo_0 + %3 = load ptr, ptr @_llgo_string, align 8 + %4 = icmp eq ptr %3, null + br i1 %4, label %_llgo_3, label %_llgo_4 + +_llgo_3: ; preds = %_llgo_2 + %5 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 24) + store ptr %5, ptr @_llgo_string, align 8 + br label %_llgo_4 + +_llgo_4: ; preds = %_llgo_3, %_llgo_2 + %6 = load ptr, ptr @_llgo_bool, align 8 + %7 = icmp eq ptr %6, null + br i1 %7, label %_llgo_5, label %_llgo_6 + +_llgo_5: ; preds = %_llgo_4 + %8 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 33) + store ptr %8, ptr @_llgo_bool, align 8 + br label %_llgo_6 + +_llgo_6: ; preds = %_llgo_5, %_llgo_4 + ret void +} + +declare ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64) + +declare { %"github.com/goplus/llgo/internal/runtime.eface", i1 } @"sync.(*Map).Load"(ptr, %"github.com/goplus/llgo/internal/runtime.eface") + +declare { i64, %"github.com/goplus/llgo/internal/runtime.iface" } @fmt.Println(%"github.com/goplus/llgo/internal/runtime.Slice") + +declare void @"sync.(*Map).Range"(ptr, { ptr, ptr }) + +define linkonce i1 @"__llgo_stub.main.main$1"(ptr %0, %"github.com/goplus/llgo/internal/runtime.eface" %1, %"github.com/goplus/llgo/internal/runtime.eface" %2) { +_llgo_0: + %3 = tail call i1 @"main.main$1"(%"github.com/goplus/llgo/internal/runtime.eface" %1, %"github.com/goplus/llgo/internal/runtime.eface" %2) + ret i1 %3 +} + +declare { i64, %"github.com/goplus/llgo/internal/runtime.iface" } @fmt.Printf(%"github.com/goplus/llgo/internal/runtime.String", %"github.com/goplus/llgo/internal/runtime.Slice") diff --git a/internal/lib/sync/map.go b/internal/lib/sync/map.go new file mode 100644 index 000000000..e8ccf58b5 --- /dev/null +++ b/internal/lib/sync/map.go @@ -0,0 +1,515 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync + +import ( + "sync/atomic" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +// +// In the terminology of the Go memory model, Map arranges that a write operation +// “synchronizes before” any read operation that observes the effect of the write, where +// read and write operations are defined as follows. +// Load, LoadAndDelete, LoadOrStore, Swap, CompareAndSwap, and CompareAndDelete +// are read operations; Delete, LoadAndDelete, Store, and Swap are write operations; +// LoadOrStore is a write operation when it returns loaded set to false; +// CompareAndSwap is a write operation when it returns swapped set to true; +// and CompareAndDelete is a write operation when it returns deleted set to true. +type Map struct { + mu Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Pointer[readOnly] + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[any]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[any]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = new(any) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p atomic.Pointer[any] +} + +func newEntry(i any) *entry { + e := &entry{} + e.p.Store(&i) + return e +} + +func (m *Map) loadReadOnly() readOnly { + if p := m.read.Load(); p != nil { + return *p + } + return readOnly{} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key any) (value any, ok bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value any, ok bool) { + p := e.p.Load() + if p == nil || p == expunged { + return nil, false + } + return *p, true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value any) { + _, _ = m.Swap(key, value) +} + +// tryCompareAndSwap compare the entry with the given old value and swaps +// it with a new value if the entry is equal to the old value, and the entry +// has not been expunged. +// +// If the entry is expunged, tryCompareAndSwap returns false and leaves +// the entry unchanged. +func (e *entry) tryCompareAndSwap(old, new any) bool { + p := e.p.Load() + if p == nil || p == expunged || *p != old { + return false + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if the comparison fails from the start, we shouldn't + // bother heap-allocating an interface value to store. + nc := new + for { + if e.p.CompareAndSwap(p, &nc) { + return true + } + p = e.p.Load() + if p == nil || p == expunged || *p != old { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return e.p.CompareAndSwap(expunged, nil) +} + +// swapLocked unconditionally swaps a value into the entry. +// +// The entry must be known not to be expunged. +func (e *entry) swapLocked(i *any) *any { + return e.p.Swap(i) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) { + // Avoid locking if it's a clean hit. + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) { + p := e.p.Load() + if p == expunged { + return nil, false, false + } + if p != nil { + return *p, true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if e.p.CompareAndSwap(nil, &ic) { + return i, false, true + } + p = e.p.Load() + if p == expunged { + return nil, false, false + } + if p != nil { + return *p, true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map) LoadAndDelete(key any) (value any, loaded bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete() + } + return nil, false +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key any) { + m.LoadAndDelete(key) +} + +func (e *entry) delete() (value any, ok bool) { + for { + p := e.p.Load() + if p == nil || p == expunged { + return nil, false + } + if e.p.CompareAndSwap(p, nil) { + return *p, true + } + } +} + +// trySwap swaps a value if the entry has not been expunged. +// +// If the entry is expunged, trySwap returns false and leaves the entry +// unchanged. +func (e *entry) trySwap(i *any) (*any, bool) { + for { + p := e.p.Load() + if p == expunged { + return nil, false + } + if e.p.CompareAndSwap(p, i) { + return p, true + } + } +} + +// Swap swaps the value for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map) Swap(key, value any) (previous any, loaded bool) { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + if v, ok := e.trySwap(&value); ok { + if v == nil { + return nil, false + } + return *v, true + } + } + + m.mu.Lock() + read = m.loadReadOnly() + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else if e, ok := m.dirty[key]; ok { + if v := e.swapLocked(&value); v != nil { + loaded = true + previous = *v + } + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(&readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() + return previous, loaded +} + +// CompareAndSwap swaps the old and new values for key +// if the value stored in the map is equal to old. +// The old value must be of a comparable type. +func (m *Map) CompareAndSwap(key, old, new any) bool { + read := m.loadReadOnly() + if e, ok := read.m[key]; ok { + return e.tryCompareAndSwap(old, new) + } else if !read.amended { + return false // No existing value for key. + } + + m.mu.Lock() + defer m.mu.Unlock() + read = m.loadReadOnly() + swapped := false + if e, ok := read.m[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + } else if e, ok := m.dirty[key]; ok { + swapped = e.tryCompareAndSwap(old, new) + // We needed to lock mu in order to load the entry for key, + // and the operation didn't change the set of keys in the map + // (so it would be made more efficient by promoting the dirty + // map to read-only). + // Count it as a miss so that we will eventually switch to the + // more efficient steady state. + m.missLocked() + } + return swapped +} + +// CompareAndDelete deletes the entry for key if its value is equal to old. +// The old value must be of a comparable type. +// +// If there is no current value for key in the map, CompareAndDelete +// returns false (even if the old value is the nil interface value). +func (m *Map) CompareAndDelete(key, old any) (deleted bool) { + read := m.loadReadOnly() + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read = m.loadReadOnly() + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Don't delete key from m.dirty: we still need to do the “compare” part + // of the operation. The entry will eventually be expunged when the + // dirty map is promoted to the read map. + // + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + for ok { + p := e.p.Load() + if p == nil || p == expunged || *p != old { + return false + } + if e.p.CompareAndSwap(p, nil) { + return true + } + } + return false +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value any) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read := m.loadReadOnly() + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read = m.loadReadOnly() + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(&read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(&readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read := m.loadReadOnly() + m.dirty = make(map[any]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := e.p.Load() + for p == nil { + if e.p.CompareAndSwap(nil, expunged) { + return true + } + p = e.p.Load() + } + return p == expunged +} diff --git a/internal/lib/sync/sync.go b/internal/lib/sync/sync.go index 69075e0c6..c2c88ce70 100644 --- a/internal/lib/sync/sync.go +++ b/internal/lib/sync/sync.go @@ -25,10 +25,6 @@ import ( "github.com/goplus/llgo/c/pthread/sync" ) -const ( - LLGoPackage = "link" -) - // ----------------------------------------------------------------------------- type Mutex sync.Mutex