diff --git a/internal/benchmark/linkedq/linkedq.go b/internal/benchmark/linkedq/linkedq.go new file mode 100644 index 0000000..b647590 --- /dev/null +++ b/internal/benchmark/linkedq/linkedq.go @@ -0,0 +1,54 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linkedq + +import "sync" + +type LinkedQueue struct { + head *linkedqueueNode + tail *linkedqueueNode + mu sync.Mutex +} + +type linkedqueueNode struct { + value uint64 + next *linkedqueueNode +} + +func New() *LinkedQueue { + node := new(linkedqueueNode) + return &LinkedQueue{head: node, tail: node} +} + +func (q *LinkedQueue) Enqueue(value uint64) bool { + q.mu.Lock() + q.tail.next = &linkedqueueNode{value: value} + q.tail = q.tail.next + q.mu.Unlock() + return true +} + +func (q *LinkedQueue) Dequeue() (uint64, bool) { + q.mu.Lock() + if q.head.next == nil { + q.mu.Unlock() + return 0, false + } else { + value := q.head.next.value + q.head = q.head.next + q.mu.Unlock() + return value, true + } +} diff --git a/internal/benchmark/msq/msq.go b/internal/benchmark/msq/msq.go new file mode 100644 index 0000000..ac1c026 --- /dev/null +++ b/internal/benchmark/msq/msq.go @@ -0,0 +1,85 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package msq + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +var msqv1pool *sync.Pool = &sync.Pool{New: func() interface{} { return new(msqv1node) }} + +type MSQueue struct { + head unsafe.Pointer // *msqv1node + tail unsafe.Pointer // *msqv1node +} + +type msqv1node struct { + value uint64 + next unsafe.Pointer // *msqv1node +} + +func New() *MSQueue { + node := unsafe.Pointer(new(msqv1node)) + return &MSQueue{head: node, tail: node} +} + +func loadMSQPointer(p *unsafe.Pointer) *msqv1node { + return (*msqv1node)(atomic.LoadPointer(p)) +} + +func (q *MSQueue) Enqueue(value uint64) bool { + node := &msqv1node{value: value} + for { + tail := atomic.LoadPointer(&q.tail) + tailstruct := (*msqv1node)(tail) + next := atomic.LoadPointer(&tailstruct.next) + if tail == atomic.LoadPointer(&q.tail) { + if next == nil { + // tail.next is empty, inset new node + if atomic.CompareAndSwapPointer(&tailstruct.next, next, unsafe.Pointer(node)) { + atomic.CompareAndSwapPointer(&q.tail, tail, unsafe.Pointer(node)) + break + } + } else { + atomic.CompareAndSwapPointer(&q.tail, tail, next) + } + } + } + return true +} + +func (q *MSQueue) Dequeue() (value uint64, ok bool) { + for { + head := atomic.LoadPointer(&q.head) + tail := atomic.LoadPointer(&q.tail) + headstruct := (*msqv1node)(head) + next := atomic.LoadPointer(&headstruct.next) + if head == atomic.LoadPointer(&q.head) { + if head == tail { + if next == nil { + return 0, false + } + atomic.CompareAndSwapPointer(&q.tail, tail, next) + } else { + value = ((*msqv1node)(next)).value + if atomic.CompareAndSwapPointer(&q.head, head, next) { + return value, true + } + } + } + } +} diff --git a/structure/hashset/hashset.go b/structure/hashset/hashset.go new file mode 100644 index 0000000..bc61a5c --- /dev/null +++ b/structure/hashset/hashset.go @@ -0,0 +1,66 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashset + +type Int64Set map[int64]struct{} + +// NewInt64 returns an empty int64 set +func NewInt64() Int64Set { + return make(map[int64]struct{}) +} + +// NewInt64WithSize returns an empty int64 set initialized with specific size +func NewInt64WithSize(size int) Int64Set { + return make(map[int64]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int64Set) Add(value int64) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Int64Set) Contains(value int64) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int64Set) Remove(value int64) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Int64Set) Range(f func(value int64) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set +func (s Int64Set) Len() int { + return len(s) +} diff --git a/structure/hashset/hashset_bench_test.go b/structure/hashset/hashset_bench_test.go new file mode 100644 index 0000000..43b7b0b --- /dev/null +++ b/structure/hashset/hashset_bench_test.go @@ -0,0 +1,187 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashset + +import ( + "math/rand" + "testing" +) + +type int64SetBool map[int64]bool + +func newInt64Bool() *int64SetBool { + return &int64SetBool{} +} + +func (s *int64SetBool) Add(value int64) bool { + (*s)[value] = true + return true +} + +func (s *int64SetBool) Contains(value int64) bool { + if _, ok := (*s)[value]; ok { + return true + } + return false +} + +func (s *int64SetBool) Remove(value int64) bool { + delete(*s, value) + return true +} + +func (s *int64SetBool) Range(f func(value int64) bool) { + for k := range *s { + if !f(k) { + break + } + } +} + +func (s *int64SetBool) Len() int { + return len(*s) +} + +type int64SetAdd map[int64]struct{} + +func newInt64Add() *int64SetAdd { + return &int64SetAdd{} +} + +func (s *int64SetAdd) Add(value int64) bool { + if s.Contains(value) { + return true + } + (*s)[value] = struct{}{} + return true +} + +func (s *int64SetAdd) Contains(value int64) bool { + if _, ok := (*s)[value]; ok { + return true + } + return false +} + +func (s *int64SetAdd) Remove(value int64) bool { + if s.Contains(value) { + delete(*s, value) + return true + } + return false +} + +func (s *int64SetAdd) Range(f func(value int64) bool) { + for k := range *s { + if !f(k) { + break + } + } +} + +func (s *int64SetAdd) Len() int { + return len(*s) +} + +const capacity = 10000000 + +var randomList [capacity]int64 + +func init() { + for i := 0; i < capacity; i++ { + randomList[i] = int64(rand.Int63()) + } +} + +func BenchmarkValueAsBool(b *testing.B) { + b.ResetTimer() + l := newInt64Bool() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } +} + +func BenchmarkValueAsEmptyStruct(b *testing.B) { + b.ResetTimer() + l := NewInt64() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } +} + +func BenchmarkAddAfterContains(b *testing.B) { + b.ResetTimer() + l := newInt64Add() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } +} + +func BenchmarkAddWithoutContains(b *testing.B) { + b.ResetTimer() + l := NewInt64() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } +} + +func BenchmarkRemoveAfterContains_Missing(b *testing.B) { + l := newInt64Add() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + l.Remove(int64(rand.Int63())) + } +} + +func BenchmarkRemoveWithoutContains_Missing(b *testing.B) { + + l := NewInt64() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + l.Remove(int64(rand.Int63())) + } +} + +func BenchmarkRemoveAfterContains_Hitting(b *testing.B) { + l := newInt64Add() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + l.Remove(randomList[n%capacity]) + } +} + +func BenchmarkRemoveWithoutContains_Hitting(b *testing.B) { + l := NewInt64() + for n := 0; n < b.N; n++ { + l.Add(randomList[n%capacity]) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + l.Remove(randomList[n%capacity]) + } +} diff --git a/structure/hashset/hashset_test.go b/structure/hashset/hashset_test.go new file mode 100644 index 0000000..8064459 --- /dev/null +++ b/structure/hashset/hashset_test.go @@ -0,0 +1,74 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashset + +import ( + "fmt" + "testing" +) + +func Example() { + l := NewInt() + + for _, v := range []int{10, 12, 15} { + l.Add(v) + } + + if l.Contains(10) { + fmt.Println("hashset contains 10") + } + + l.Range(func(value int) bool { + fmt.Println("hashset range found ", value) + return true + }) + + l.Remove(15) + fmt.Printf("hashset contains %d items\r\n", l.Len()) +} + +func TestIntSet(t *testing.T) { + // Correctness. + l := NewInt64() + if l.Len() != 0 { + t.Fatal("invalid length") + } + if l.Contains(0) { + t.Fatal("invalid contains") + } + + if l.Add(0); l.Len() != 1 { + t.Fatal("invalid add") + } + if !l.Contains(0) { + t.Fatal("invalid contains") + } + if l.Remove(0); l.Len() != 0 { + t.Fatal("invalid remove") + } + + if l.Add(20); l.Len() != 1 { + t.Fatal("invalid add") + } + if l.Add(22); l.Len() != 2 { + t.Fatal("invalid add") + } + if l.Add(21); l.Len() != 3 { + t.Fatal("invalid add") + } + if l.Add(21); l.Len() != 3 { + t.Fatal(l.Len(), " invalid add") + } +} diff --git a/structure/hashset/types.go b/structure/hashset/types.go new file mode 100644 index 0000000..d467e5c --- /dev/null +++ b/structure/hashset/types.go @@ -0,0 +1,485 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go run types_gen.go; DO NOT EDIT. + +package hashset + +type Float32Set map[float32]struct{} + +// NewFloat32 returns an empty float32 set +func NewFloat32() Float32Set { + return make(map[float32]struct{}) +} + +// NewFloat32WithSize returns an empty float32 set initialized with specific size +func NewFloat32WithSize(size int) Float32Set { + return make(map[float32]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Float32Set) Add(value float32) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Float32Set) Contains(value float32) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Float32Set) Remove(value float32) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Float32Set) Range(f func(value float32) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Float32Set) Len() int { + return len(s) +} + +type Float64Set map[float64]struct{} + +// NewFloat64 returns an empty float64 set +func NewFloat64() Float64Set { + return make(map[float64]struct{}) +} + +// NewFloat64WithSize returns an empty float64 set initialized with specific size +func NewFloat64WithSize(size int) Float64Set { + return make(map[float64]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Float64Set) Add(value float64) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Float64Set) Contains(value float64) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Float64Set) Remove(value float64) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Float64Set) Range(f func(value float64) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Float64Set) Len() int { + return len(s) +} + +type Int32Set map[int32]struct{} + +// NewInt32 returns an empty int32 set +func NewInt32() Int32Set { + return make(map[int32]struct{}) +} + +// NewInt32WithSize returns an empty int32 set initialized with specific size +func NewInt32WithSize(size int) Int32Set { + return make(map[int32]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int32Set) Add(value int32) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Int32Set) Contains(value int32) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int32Set) Remove(value int32) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Int32Set) Range(f func(value int32) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Int32Set) Len() int { + return len(s) +} + +type Int16Set map[int16]struct{} + +// NewInt16 returns an empty int16 set +func NewInt16() Int16Set { + return make(map[int16]struct{}) +} + +// NewInt16WithSize returns an empty int16 set initialized with specific size +func NewInt16WithSize(size int) Int16Set { + return make(map[int16]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int16Set) Add(value int16) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Int16Set) Contains(value int16) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Int16Set) Remove(value int16) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Int16Set) Range(f func(value int16) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Int16Set) Len() int { + return len(s) +} + +type IntSet map[int]struct{} + +// NewInt returns an empty int set +func NewInt() IntSet { + return make(map[int]struct{}) +} + +// NewIntWithSize returns an empty int set initialized with specific size +func NewIntWithSize(size int) IntSet { + return make(map[int]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s IntSet) Add(value int) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s IntSet) Contains(value int) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s IntSet) Remove(value int) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s IntSet) Range(f func(value int) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s IntSet) Len() int { + return len(s) +} + +type Uint64Set map[uint64]struct{} + +// NewUint64 returns an empty uint64 set +func NewUint64() Uint64Set { + return make(map[uint64]struct{}) +} + +// NewUint64WithSize returns an empty uint64 set initialized with specific size +func NewUint64WithSize(size int) Uint64Set { + return make(map[uint64]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint64Set) Add(value uint64) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Uint64Set) Contains(value uint64) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint64Set) Remove(value uint64) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Uint64Set) Range(f func(value uint64) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Uint64Set) Len() int { + return len(s) +} + +type Uint32Set map[uint32]struct{} + +// NewUint32 returns an empty uint32 set +func NewUint32() Uint32Set { + return make(map[uint32]struct{}) +} + +// NewUint32WithSize returns an empty uint32 set initialized with specific size +func NewUint32WithSize(size int) Uint32Set { + return make(map[uint32]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint32Set) Add(value uint32) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Uint32Set) Contains(value uint32) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint32Set) Remove(value uint32) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Uint32Set) Range(f func(value uint32) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Uint32Set) Len() int { + return len(s) +} + +type Uint16Set map[uint16]struct{} + +// NewUint16 returns an empty uint16 set +func NewUint16() Uint16Set { + return make(map[uint16]struct{}) +} + +// NewUint16WithSize returns an empty uint16 set initialized with specific size +func NewUint16WithSize(size int) Uint16Set { + return make(map[uint16]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint16Set) Add(value uint16) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s Uint16Set) Contains(value uint16) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s Uint16Set) Remove(value uint16) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s Uint16Set) Range(f func(value uint16) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s Uint16Set) Len() int { + return len(s) +} + +type UintSet map[uint]struct{} + +// NewUint returns an empty uint set +func NewUint() UintSet { + return make(map[uint]struct{}) +} + +// NewUintWithSize returns an empty uint set initialized with specific size +func NewUintWithSize(size int) UintSet { + return make(map[uint]struct{}, size) +} + +// Add adds the specified element to this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s UintSet) Add(value uint) bool { + s[value] = struct{}{} + return true +} + +// Contains returns true if this set contains the specified element +func (s UintSet) Contains(value uint) bool { + if _, ok := s[value]; ok { + return true + } + return false +} + +// Remove removes the specified element from this set +// Always returns true due to the build-in map doesn't indicate caller whether the given element already exists +// Reserves the return type for future extension +func (s UintSet) Remove(value uint) bool { + delete(s, value) + return true +} + +// Range calls f sequentially for each value present in the hashset. +// If f returns false, range stops the iteration. +func (s UintSet) Range(f func(value uint) bool) { + for k := range s { + if !f(k) { + break + } + } +} + +// Len returns the number of elements of this set + +func (s UintSet) Len() int { + return len(s) +} diff --git a/structure/hashset/types_gen.go b/structure/hashset/types_gen.go new file mode 100644 index 0000000..5d0b4dc --- /dev/null +++ b/structure/hashset/types_gen.go @@ -0,0 +1,104 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build ignore +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "os" + "strings" +) + +var lengthFunction = `func (s Int64Set) Len() int { + return len(s) +}` + +func main() { + f, err := os.Open("hashset.go") + if err != nil { + panic(err) + } + filedata, err := ioutil.ReadAll(f) + if err != nil { + panic(err) + } + w := new(bytes.Buffer) + w.WriteString(`// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go run types_gen.go; DO NOT EDIT.` + "\r\n\n") + start_pos := strings.Index(string(filedata), "package hashset") + w.WriteString(string(filedata)[start_pos : start_pos+len("package hashset")]) + ts := []string{"Float32", "Float64", "Int32", "Int16", "Int", "Uint64", "Uint32", "Uint16", "Uint"} // all types need to be converted + + for _, upper := range ts { + lower := strings.ToLower(upper) + data := string(filedata) + // Remove header. + data = data[start_pos+len("package hashset"):] + // Remove the special case. + data = strings.Replace(data, lengthFunction, "", -1) + // Common cases. + data = strings.Replace(data, "int64", lower, -1) + data = strings.Replace(data, "Int64", upper, -1) + if inSlice(lowerSlice(ts), lower) { + data = strings.Replace(data, "length "+lower, "length int64", 1) + } + // Add the special case. + data = data + strings.Replace(lengthFunction, "Int64Set", upper+"Set", 1) + w.WriteString(data) + w.WriteString("\r\n") + } + + out, err := format.Source(w.Bytes()) + if err != nil { + panic(err) + } + if err := ioutil.WriteFile("types.go", out, 0660); err != nil { + panic(err) + } +} + +func lowerSlice(s []string) []string { + n := make([]string, len(s)) + for i, v := range s { + n[i] = strings.ToLower(v) + } + return n +} + +func inSlice(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} diff --git a/structure/lscq/README.md b/structure/lscq/README.md new file mode 100644 index 0000000..b10e9a0 --- /dev/null +++ b/structure/lscq/README.md @@ -0,0 +1,129 @@ +# LSCQ + +LSCQ is a scalable, unbounded, multiple-producer and multiple-consumer FIFO queue in Go language. + +In the benchmark(AMD 3700x, running at 3.6 GHZ, -cpu=16), the LSCQ outperforms lock-based linked queue **5x ~ 6x** in most cases. Since built-in channel is a bounded queue, we can only compared it in EnqueueDequeuePair, the LSCQ outperforms built-in channel **8x ~ 9x** in this case. + +The ideas behind the LSCQ are [A Scalable, Portable, and Memory-Efficient Lock-Free FIFO Queue](https://arxiv.org/abs/1908.04511) and [Fast Concurrent Queues for x86 Processors](https://www.cs.tau.ac.il/~mad/publications/ppopp2013-x86queues.pdf). + + + +## QuickStart + +```go +package main + +import ( + "github.com/songzhibin97/gkit/structure/lscq" +) + +func main() { + q := lscq.NewUint64() + q.Enqueue(100) + println(q.Dequeue()) +} +``` + + + +## Benchmark + +- Go version: go1.16.2 linux/amd64 +- OS: ubuntu 18.04 +- CPU: AMD 3700x(8C16T), running at 3.6 GHZ (disable CPU turbo boost) +- MEMORY: 16G x 2 DDR4 memory, running at 3200 MHZ + + + +### CPU=100 + +```bash +go test -bench=. -cpu=100 -run=NOTEST -benchtime=1000000x +``` + +![benchmarkcpu100](https://raw.githubusercontent.com/zhangyunhao116/public-data/master/lscq-benchmark-cpu100.png) + +``` +Default/EnqueueOnly/LSCQ-100 38.9ns ±14% +Default/EnqueueOnly/linkedQ-100 209ns ± 3% +Default/EnqueueOnly/msqueue-100 379ns ± 2% +Default/DequeueOnlyEmpty/LSCQ-100 10.0ns ±31% +Default/DequeueOnlyEmpty/linkedQ-100 79.2ns ± 4% +Default/DequeueOnlyEmpty/msqueue-100 7.59ns ±44% +Default/Pair/LSCQ-100 58.7ns ± 7% +Default/Pair/linkedQ-100 324ns ± 5% +Default/Pair/msqueue-100 393ns ± 2% +Default/50Enqueue50Dequeue/LSCQ-100 34.9ns ± 8% +Default/50Enqueue50Dequeue/linkedQ-100 183ns ± 7% +Default/50Enqueue50Dequeue/msqueue-100 191ns ± 3% +Default/30Enqueue70Dequeue/LSCQ-100 78.5ns ± 4% +Default/30Enqueue70Dequeue/linkedQ-100 148ns ± 8% +Default/30Enqueue70Dequeue/msqueue-100 136ns ± 4% +Default/70Enqueue30Dequeue/LSCQ-100 36.2ns ±13% +Default/70Enqueue30Dequeue/linkedQ-100 195ns ± 4% +Default/70Enqueue30Dequeue/msqueue-100 267ns ± 2% +``` + + + +### CPU=16 + +```bash +go test -bench=. -cpu=16 -run=NOTEST -benchtime=1000000x +``` + +![benchmarkcpu16](https://raw.githubusercontent.com/zhangyunhao116/public-data/master/lscq-benchmark-cpu16.png) + +``` +Default/EnqueueOnly/LSCQ-16 33.7ns ± 5% +Default/EnqueueOnly/linkedQ-16 177ns ± 2% +Default/EnqueueOnly/msqueue-16 370ns ± 1% +Default/DequeueOnlyEmpty/LSCQ-16 3.27ns ±47% +Default/DequeueOnlyEmpty/linkedQ-16 91.1ns ± 2% +Default/DequeueOnlyEmpty/msqueue-16 3.23ns ±46% +Default/Pair/LSCQ-16 56.1ns ± 3% +Default/Pair/linkedQ-16 290ns ± 1% +Default/Pair/msqueue-16 367ns ± 1% +Default/50Enqueue50Dequeue/LSCQ-16 31.8ns ± 3% +Default/50Enqueue50Dequeue/linkedQ-16 157ns ± 8% +Default/50Enqueue50Dequeue/msqueue-16 188ns ± 4% +Default/30Enqueue70Dequeue/LSCQ-16 73.8ns ± 2% +Default/30Enqueue70Dequeue/linkedQ-16 149ns ± 5% +Default/30Enqueue70Dequeue/msqueue-16 123ns ± 2% +Default/70Enqueue30Dequeue/LSCQ-16 28.8ns ± 4% +Default/70Enqueue30Dequeue/linkedQ-16 176ns ± 3% +Default/70Enqueue30Dequeue/msqueue-16 261ns ± 2% +``` + + + +### CPU=1 + +```bash +go test -bench=. -cpu=1 -run=NOTEST -benchtime=1000000x +``` + +![benchmarkcpu1](https://raw.githubusercontent.com/zhangyunhao116/public-data/master/lscq-benchmark-cpu1.png) + +``` +name time/op +Default/EnqueueOnly/LSCQ 17.3ns ± 1% +Default/EnqueueOnly/linkedQ 59.9ns ± 6% +Default/EnqueueOnly/msqueue 67.1ns ± 2% +Default/DequeueOnlyEmpty/LSCQ 4.77ns ± 1% +Default/DequeueOnlyEmpty/linkedQ 11.3ns ± 2% +Default/DequeueOnlyEmpty/msqueue 3.14ns ± 1% +Default/Pair/LSCQ 36.7ns ± 0% +Default/Pair/linkedQ 56.2ns ± 6% +Default/Pair/msqueue 60.2ns ± 2% +Default/50Enqueue50Dequeue/LSCQ 23.1ns ± 2% +Default/50Enqueue50Dequeue/linkedQ 34.1ns ± 3% +Default/50Enqueue50Dequeue/msqueue 40.8ns ± 9% +Default/30Enqueue70Dequeue/LSCQ 26.5ns ± 2% +Default/30Enqueue70Dequeue/linkedQ 27.0ns ±28% +Default/30Enqueue70Dequeue/msqueue 26.7ns ± 7% +Default/70Enqueue30Dequeue/LSCQ 25.2ns ± 5% +Default/70Enqueue30Dequeue/linkedQ 47.3ns ± 5% +Default/70Enqueue30Dequeue/msqueue 55.2ns ± 8% +``` + diff --git a/structure/lscq/asm.go b/structure/lscq/asm.go new file mode 100644 index 0000000..1b32144 --- /dev/null +++ b/structure/lscq/asm.go @@ -0,0 +1,68 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build amd64 && !gccgo && !appengine +// +build amd64,!gccgo,!appengine + +package lscq + +import ( + "unsafe" +) + +type uint128 [2]uint64 + +func compareAndSwapUint128(addr *uint128, old1, old2, new1, new2 uint64) (swapped bool) + +func loadUint128(addr *uint128) (val uint128) + +func loadSCQNodePointer(addr unsafe.Pointer) (val scqNodePointer) + +func loadSCQNodeUint64(addr unsafe.Pointer) (val scqNodeUint64) + +func atomicTestAndSetFirstBit(addr *uint64) (val uint64) + +func atomicTestAndSetSecondBit(addr *uint64) (val uint64) + +func resetNode(addr unsafe.Pointer) + +//go:nosplit +func compareAndSwapSCQNodePointer(addr *scqNodePointer, old, new scqNodePointer) (swapped bool) { + // Ref: src/runtime/atomic_pointer.go:sync_atomic_CompareAndSwapPointer + if runtimeEnableWriteBarrier() { + runtimeatomicwb(&addr.data, new.data) + } + return compareAndSwapUint128((*uint128)(runtimenoescape(unsafe.Pointer(addr))), old.flags, uint64(uintptr(old.data)), new.flags, uint64(uintptr(new.data))) +} + +func compareAndSwapSCQNodeUint64(addr *scqNodeUint64, old, new scqNodeUint64) (swapped bool) { + return compareAndSwapUint128((*uint128)(unsafe.Pointer(addr)), old.flags, old.data, new.flags, new.data) +} + +func runtimeEnableWriteBarrier() bool + +//go:linkname runtimeatomicwb runtime.atomicwb +//go:noescape +func runtimeatomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) + +//go:linkname runtimenoescape runtime.noescape +func runtimenoescape(p unsafe.Pointer) unsafe.Pointer + +//go:nosplit +func atomicWriteBarrier(ptr *unsafe.Pointer) { + // For SCQ dequeue only. (fastpath) + if runtimeEnableWriteBarrier() { + runtimeatomicwb(ptr, nil) + } +} diff --git a/structure/lscq/asm.s b/structure/lscq/asm.s new file mode 100644 index 0000000..e47eb90 --- /dev/null +++ b/structure/lscq/asm.s @@ -0,0 +1,72 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +TEXT ·compareAndSwapUint128(SB),NOSPLIT,$0 + MOVQ addr+0(FP), R8 + MOVQ old1+8(FP), AX + MOVQ old2+16(FP), DX + MOVQ new1+24(FP), BX + MOVQ new2+32(FP), CX + LOCK + CMPXCHG16B (R8) + SETEQ swapped+40(FP) + RET + +TEXT ·loadUint128(SB),NOSPLIT,$0 + MOVQ addr+0(FP), R8 + XORQ AX, AX + XORQ DX, DX + XORQ BX, BX + XORQ CX, CX + LOCK + CMPXCHG16B (R8) + MOVQ AX, val_0+8(FP) + MOVQ DX, val_1+16(FP) + RET + +TEXT ·loadSCQNodeUint64(SB),NOSPLIT,$0 + JMP ·loadUint128(SB) + +TEXT ·loadSCQNodePointer(SB),NOSPLIT,$0 + JMP ·loadUint128(SB) + +TEXT ·atomicTestAndSetFirstBit(SB),NOSPLIT,$0 + MOVQ addr+0(FP), DX + LOCK + BTSQ $63,(DX) + MOVQ AX, val+8(FP) + RET + +TEXT ·atomicTestAndSetSecondBit(SB),NOSPLIT,$0 + MOVQ addr+0(FP), DX + LOCK + BTSQ $62,(DX) + MOVQ AX, val+8(FP) + RET + +TEXT ·resetNode(SB),NOSPLIT,$0 + MOVQ addr+0(FP), DX + MOVQ $0, 8(DX) + LOCK + BTSQ $62, (DX) + RET + +TEXT ·runtimeEnableWriteBarrier(SB),NOSPLIT,$0 + MOVL runtime·writeBarrier(SB), AX + MOVB AX, ret+0(FP) + RET diff --git a/structure/lscq/bench_test.go b/structure/lscq/bench_test.go new file mode 100644 index 0000000..12565ef --- /dev/null +++ b/structure/lscq/bench_test.go @@ -0,0 +1,190 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lscq + +import ( + "github.com/songzhibin97/gkit/internal/benchmark/linkedq" + "github.com/songzhibin97/gkit/internal/benchmark/msq" + "github.com/songzhibin97/gkit/sys/fastrand" + "sync/atomic" + "testing" +) + +type uint64queue interface { + Enqueue(uint64) bool + Dequeue() (uint64, bool) +} + +type benchTask struct { + name string + New func() uint64queue +} + +type faa int64 + +func (data *faa) Enqueue(_ uint64) bool { + atomic.AddInt64((*int64)(data), 1) + return true +} + +func (data *faa) Dequeue() (uint64, bool) { + atomic.AddInt64((*int64)(data), -1) + return 0, false +} + +func BenchmarkDefault(b *testing.B) { + all := []benchTask{{ + name: "LSCQ", New: func() uint64queue { + return NewUint64() + }}} + all = append(all, benchTask{ + name: "LinkedQueue", + New: func() uint64queue { + return linkedq.New() + }, + }) + all = append(all, benchTask{ + name: "MSQueue", + New: func() uint64queue { + return msq.New() + }, + }) + // all = append(all, benchTask{ + // name: "FAA", + // New: func() uint64queue { + // return new(faa) + // }, + // }) + // all = append(all, benchTask{ + // name: "channel", + // New: func() uint64queue { + // return newChannelQ(scqsize) + // }, + // }) + benchEnqueueOnly(b, all) + benchDequeueOnlyEmpty(b, all) + benchPair(b, all) + bench50Enqueue50Dequeue(b, all) + bench30Enqueue70Dequeue(b, all) + bench70Enqueue30Dequeue(b, all) +} + +func reportalloc(b *testing.B) { + // b.SetBytes(8) + // b.ReportAllocs() +} + +func benchPair(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("Pair/"+v.name, func(b *testing.B) { + q := v.New() + reportalloc(b) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + q.Enqueue(uint64(fastrand.Uint32())) + q.Dequeue() + } + }) + }) + } +} + +func bench50Enqueue50Dequeue(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("50Enqueue50Dequeue/"+v.name, func(b *testing.B) { + q := v.New() + b.ResetTimer() + reportalloc(b) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if fastrand.Uint32n(2) == 0 { + q.Enqueue(uint64(fastrand.Uint32())) + } else { + q.Dequeue() + } + } + }) + }) + } +} + +func bench70Enqueue30Dequeue(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("70Enqueue30Dequeue/"+v.name, func(b *testing.B) { + q := v.New() + reportalloc(b) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if fastrand.Uint32n(10) > 2 { + q.Enqueue(uint64(fastrand.Uint32())) + } else { + q.Dequeue() + } + } + }) + }) + } +} + +func bench30Enqueue70Dequeue(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("30Enqueue70Dequeue/"+v.name, func(b *testing.B) { + q := v.New() + reportalloc(b) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if fastrand.Uint32n(10) <= 2 { + q.Enqueue(uint64(fastrand.Uint32())) + } else { + q.Dequeue() + } + } + }) + }) + } +} + +func benchEnqueueOnly(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("EnqueueOnly/"+v.name, func(b *testing.B) { + q := v.New() + reportalloc(b) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + q.Enqueue(uint64(fastrand.Uint32())) + } + }) + }) + } +} + +func benchDequeueOnlyEmpty(b *testing.B, benchTasks []benchTask) { + for _, v := range benchTasks { + b.Run("DequeueOnlyEmpty/"+v.name, func(b *testing.B) { + q := v.New() + reportalloc(b) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + q.Dequeue() + } + }) + }) + } +} diff --git a/structure/lscq/lscq.go b/structure/lscq/lscq.go new file mode 100644 index 0000000..eb62b67 --- /dev/null +++ b/structure/lscq/lscq.go @@ -0,0 +1,243 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lscq + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +var pointerSCQPool = sync.Pool{ + New: func() interface{} { + return newPointerSCQ() + }, +} + +type PointerQueue struct { + head *pointerSCQ + _ [cacheLineSize - unsafe.Sizeof(new(uintptr))]byte + tail *pointerSCQ +} + +func NewPointer() *PointerQueue { + q := newPointerSCQ() + return &PointerQueue{head: q, tail: q} +} + +func (q *PointerQueue) Dequeue() (data unsafe.Pointer, ok bool) { + for { + cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)))) + data, ok = cq.Dequeue() + if ok { + return + } + // cq does not have enough entries. + nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) + if nex == nil { + // We don't have next SCQ. + return + } + // cq.next is not empty, subsequent entry will be insert into cq.next instead of cq. + // So if cq is empty, we can move it into ncqpool. + atomic.StoreInt64(&cq.threshold, int64(scqsize*2)-1) + data, ok = cq.Dequeue() + if ok { + return + } + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), (unsafe.Pointer(cq)), nex) { + // We can't ensure no other goroutines will access cq. + // The cq can still be previous dequeue's cq. + cq = nil + } + } +} + +func (q *PointerQueue) Enqueue(data unsafe.Pointer) bool { + for { + cq := (*pointerSCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)))) + nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) + if nex != nil { + // Help move cq.next into tail. + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), (unsafe.Pointer(cq)), nex) + continue + } + if cq.Enqueue(data) { + return true + } + // Concurrent cq is full. + atomicTestAndSetFirstBit(&cq.tail) // close cq, subsequent enqueue will fail + cq.mu.Lock() + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) != nil { + cq.mu.Unlock() + continue + } + ncq := pointerSCQPool.Get().(*pointerSCQ) // create a new queue + ncq.Enqueue(data) + // Try Add this queue into cq.next. + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)), nil, unsafe.Pointer(ncq)) { + // Success. + // Try move cq.next into tail (we don't need to recheck since other enqueuer will help). + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(cq), unsafe.Pointer(ncq)) + cq.mu.Unlock() + return true + } + // CAS failed, put this new SCQ into scqpool. + // No other goroutines will access this queue. + ncq.Dequeue() + pointerSCQPool.Put(ncq) + cq.mu.Unlock() + } +} + +func newPointerSCQ() *pointerSCQ { + ring := new([scqsize]scqNodePointer) + for i := range ring { + ring[i].flags = 1<<63 + 1<<62 // newSCQFlags(true, true, 0) + } + return &pointerSCQ{ + head: scqsize, + tail: scqsize, + threshold: -1, + ring: ring, + } +} + +type pointerSCQ struct { + _ [cacheLineSize]byte + head uint64 + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + tail uint64 // 1-bit finalize + 63-bit tail + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + threshold int64 + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + next *pointerSCQ + ring *[scqsize]scqNodePointer + mu sync.Mutex +} + +type scqNodePointer struct { + flags uint64 // isSafe 1-bit + isEmpty 1-bit + cycle 62-bit + data unsafe.Pointer +} + +func (q *pointerSCQ) Enqueue(data unsafe.Pointer) bool { + for { + // Increment the TAIL, try to occupy an entry. + tailvalue := atomic.AddUint64(&q.tail, 1) + tailvalue -= 1 // we need previous value + T := uint64Get63(tailvalue) + if uint64Get1(tailvalue) { + // The queue is closed, return false, so following enqueuer + // will insert this data into next SCQ. + return false + } + entAddr := &q.ring[cacheRemap16Byte(T)] + cycleT := T / scqsize + eqretry: + // Enqueue do not need data, if this entry is empty, we can assume the data is also empty. + entFlags := atomic.LoadUint64(&entAddr.flags) + isSafe, isEmpty, cycleEnt := loadSCQFlags(entFlags) + if cycleEnt < cycleT && isEmpty && (isSafe || atomic.LoadUint64(&q.head) <= T) { + // We can use this entry for adding new data if + // 1. Tail's cycle is bigger than entry's cycle. + // 2. It is empty. + // 3. It is safe or tail >= head (There is enough space for this data) + ent := scqNodePointer{flags: entFlags} + newEnt := scqNodePointer{flags: newSCQFlags(true, false, cycleT), data: data} + // Save input data into this entry. + if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) { + // Failed, do next retry. + goto eqretry + } + // Success. + if atomic.LoadInt64(&q.threshold) != (int64(scqsize)*2)-1 { + atomic.StoreInt64(&q.threshold, (int64(scqsize)*2)-1) + } + return true + } + // Add a full queue check in the loop(CAS2). + if T+1 >= atomic.LoadUint64(&q.head)+scqsize { + // T is tail's value before FAA(1), latest tail is T+1. + return false + } + } +} + +func (q *pointerSCQ) Dequeue() (data unsafe.Pointer, ok bool) { + if atomic.LoadInt64(&q.threshold) < 0 { + // Empty queue. + return + } + + for { + // Decrement HEAD, try to release an entry. + H := atomic.AddUint64(&q.head, 1) + H -= 1 // we need previous value + entAddr := &q.ring[cacheRemap16Byte(H)] + cycleH := H / scqsize + dqretry: + ent := loadSCQNodePointer(unsafe.Pointer(entAddr)) + isSafe, isEmpty, cycleEnt := loadSCQFlags(ent.flags) + if cycleEnt == cycleH { // same cycle, return this entry directly + // 1. Clear the data in this slot. + // 2. Set `isEmpty` to 1 + atomicWriteBarrier(&entAddr.data) + resetNode(unsafe.Pointer(entAddr)) + return ent.data, true + } + if cycleEnt < cycleH { + var newEnt scqNodePointer + if isEmpty { + newEnt = scqNodePointer{flags: newSCQFlags(isSafe, true, cycleH)} + } else { + newEnt = scqNodePointer{flags: newSCQFlags(false, false, cycleEnt), data: ent.data} + } + if !compareAndSwapSCQNodePointer(entAddr, ent, newEnt) { + goto dqretry + } + } + // Check if the queue is empty. + tailvalue := atomic.LoadUint64(&q.tail) + T := uint64Get63(tailvalue) + if T <= H+1 { + // Invalid state. + q.fixstate(H + 1) + atomic.AddInt64(&q.threshold, -1) + return + } + if atomic.AddInt64(&q.threshold, -1)+1 <= 0 { + return + } + } +} + +func (q *pointerSCQ) fixstate(originalHead uint64) { + for { + head := atomic.LoadUint64(&q.head) + if originalHead < head { + // The last dequeuer will be responsible for fixstate. + return + } + tailvalue := atomic.LoadUint64(&q.tail) + if tailvalue >= head { + // The queue has been closed, or in normal state. + return + } + if atomic.CompareAndSwapUint64(&q.tail, tailvalue, head) { + return + } + } +} diff --git a/structure/lscq/lscq_test.go b/structure/lscq/lscq_test.go new file mode 100644 index 0000000..0b9252a --- /dev/null +++ b/structure/lscq/lscq_test.go @@ -0,0 +1,182 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lscq + +import ( + "github.com/songzhibin97/gkit/structure/skipset" + "github.com/songzhibin97/gkit/sys/fastrand" + "sync" + "testing" +) + +func TestBoundedQueue(t *testing.T) { + q := newUint64SCQ() + s := skipset.NewUint64() + + // Dequeue empty queue. + val, ok := q.Dequeue() + if ok { + t.Fatal(val) + } + + // Single goroutine correctness. + for i := 0; i < scqsize; i++ { + if !q.Enqueue(uint64(i)) { + t.Fatal(i) + } + s.Add(uint64(i)) + } + + if q.Enqueue(20) { // queue is full + t.Fatal() + } + + s.Range(func(value uint64) bool { + if val, ok := q.Dequeue(); !ok || val != value { + t.Fatal(val, ok, value) + } + return true + }) + + // Dequeue empty queue after previous loop. + val, ok = q.Dequeue() + if ok { + t.Fatal(val) + } + + // ---------- MULTIPLE TEST BEGIN ----------. + for j := 0; j < 10; j++ { + s = skipset.NewUint64() + + // Dequeue empty queue. + val, ok = q.Dequeue() + if ok { + t.Fatal(val) + } + + // Single goroutine correctness. + for i := 0; i < scqsize; i++ { + if !q.Enqueue(uint64(i)) { + t.Fatal() + } + s.Add(uint64(i)) + } + + if q.Enqueue(20) { // queue is full + t.Fatal() + } + + s.Range(func(value uint64) bool { + if val, ok := q.Dequeue(); !ok || val != value { + t.Fatal(val, ok, value) + } + return true + }) + + // Dequeue empty queue after previous loop. + val, ok = q.Dequeue() + if ok { + t.Fatal(val) + } + } + // ---------- MULTIPLE TEST END ----------. + + // MPMC correctness. + var wg sync.WaitGroup + s1 := skipset.NewUint64() + s2 := skipset.NewUint64() + for i := 0; i < 100000; i++ { + wg.Add(1) + go func() { + if fastrand.Uint32n(2) == 0 { + r := fastrand.Uint64() + if q.Enqueue(r) { + s1.Add(r) + } + } else { + val, ok := q.Dequeue() + if ok { + s2.Add(uint64(val)) + } + } + wg.Done() + }() + } + wg.Wait() + + for { + val, ok := q.Dequeue() + if !ok { + break + } + s2.Add(uint64(val)) + } + + s1.Range(func(value uint64) bool { + if !s2.Contains(value) { + t.Fatal(value) + } + return true + }) + + if s1.Len() != s2.Len() { + t.Fatal("invalid") + } +} + +func TestUnboundedQueue(t *testing.T) { + // MPMC correctness. + q := NewUint64() + var wg sync.WaitGroup + s1 := skipset.NewUint64() + s2 := skipset.NewUint64() + for i := 0; i < 100000; i++ { + wg.Add(1) + go func() { + if fastrand.Uint32n(2) == 0 { + r := fastrand.Uint64() + if !s1.Add(r) || !q.Enqueue(r) { + panic("invalid") + } + } else { + val, ok := q.Dequeue() + if ok { + s2.Add(uint64(val)) + } + } + wg.Done() + }() + } + wg.Wait() + + for { + val, ok := q.Dequeue() + if !ok { + break + } + s2.Add(uint64(val)) + } + + s1.Range(func(value uint64) bool { + if !s2.Contains(value) { + t.Fatal(value) + } + return true + }) + + if s1.Len() != s2.Len() { + t.Fatal("invalid") + } +} diff --git a/structure/lscq/types.go b/structure/lscq/types.go new file mode 100644 index 0000000..a30d735 --- /dev/null +++ b/structure/lscq/types.go @@ -0,0 +1,243 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Code generated by go run types_gen.go; DO NOT EDIT. +package lscq + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +var uint64SCQPool = sync.Pool{ + New: func() interface{} { + return newUint64SCQ() + }, +} + +type Uint64Queue struct { + head *uint64SCQ + _ [cacheLineSize - unsafe.Sizeof(new(uintptr))]byte + tail *uint64SCQ +} + +func NewUint64() *Uint64Queue { + q := newUint64SCQ() + return &Uint64Queue{head: q, tail: q} +} + +func (q *Uint64Queue) Dequeue() (data uint64, ok bool) { + for { + cq := (*uint64SCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)))) + data, ok = cq.Dequeue() + if ok { + return + } + // cq does not have enough entries. + nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) + if nex == nil { + // We don't have next SCQ. + return + } + // cq.next is not empty, subsequent entry will be insert into cq.next instead of cq. + // So if cq is empty, we can move it into ncqpool. + atomic.StoreInt64(&cq.threshold, int64(scqsize*2)-1) + data, ok = cq.Dequeue() + if ok { + return + } + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), (unsafe.Pointer(cq)), nex) { + // We can't ensure no other goroutines will access cq. + // The cq can still be previous dequeue's cq. + cq = nil + } + } +} + +func (q *Uint64Queue) Enqueue(data uint64) bool { + for { + cq := (*uint64SCQ)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)))) + nex := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) + if nex != nil { + // Help move cq.next into tail. + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), (unsafe.Pointer(cq)), nex) + continue + } + if cq.Enqueue(data) { + return true + } + // Concurrent cq is full. + atomicTestAndSetFirstBit(&cq.tail) // close cq, subsequent enqueue will fail + cq.mu.Lock() + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next))) != nil { + cq.mu.Unlock() + continue + } + ncq := uint64SCQPool.Get().(*uint64SCQ) // create a new queue + ncq.Enqueue(data) + // Try Add this queue into cq.next. + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&cq.next)), nil, unsafe.Pointer(ncq)) { + // Success. + // Try move cq.next into tail (we don't need to recheck since other enqueuer will help). + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(cq), unsafe.Pointer(ncq)) + cq.mu.Unlock() + return true + } + // CAS failed, put this new SCQ into scqpool. + // No other goroutines will access this queue. + ncq.Dequeue() + uint64SCQPool.Put(ncq) + cq.mu.Unlock() + } +} + +func newUint64SCQ() *uint64SCQ { + ring := new([scqsize]scqNodeUint64) + for i := range ring { + ring[i].flags = 1<<63 + 1<<62 // newSCQFlags(true, true, 0) + } + return &uint64SCQ{ + head: scqsize, + tail: scqsize, + threshold: -1, + ring: ring, + } +} + +type uint64SCQ struct { + _ [cacheLineSize]byte + head uint64 + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + tail uint64 // 1-bit finalize + 63-bit tail + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + threshold int64 + _ [cacheLineSize - unsafe.Sizeof(new(uint64))]byte + next *uint64SCQ + ring *[scqsize]scqNodeUint64 + mu sync.Mutex +} + +type scqNodeUint64 struct { + flags uint64 // isSafe 1-bit + isEmpty 1-bit + cycle 62-bit + data uint64 +} + +func (q *uint64SCQ) Enqueue(data uint64) bool { + for { + // Increment the TAIL, try to occupy an entry. + tailvalue := atomic.AddUint64(&q.tail, 1) + tailvalue -= 1 // we need previous value + T := uint64Get63(tailvalue) + if uint64Get1(tailvalue) { + // The queue is closed, return false, so following enqueuer + // will insert this data into next SCQ. + return false + } + entAddr := &q.ring[cacheRemap16Byte(T)] + cycleT := T / scqsize + eqretry: + // Enqueue do not need data, if this entry is empty, we can assume the data is also empty. + entFlags := atomic.LoadUint64(&entAddr.flags) + isSafe, isEmpty, cycleEnt := loadSCQFlags(entFlags) + if cycleEnt < cycleT && isEmpty && (isSafe || atomic.LoadUint64(&q.head) <= T) { + // We can use this entry for adding new data if + // 1. Tail's cycle is bigger than entry's cycle. + // 2. It is empty. + // 3. It is safe or tail >= head (There is enough space for this data) + ent := scqNodeUint64{flags: entFlags} + newEnt := scqNodeUint64{flags: newSCQFlags(true, false, cycleT), data: data} + // Save input data into this entry. + if !compareAndSwapSCQNodeUint64(entAddr, ent, newEnt) { + // Failed, do next retry. + goto eqretry + } + // Success. + if atomic.LoadInt64(&q.threshold) != (int64(scqsize)*2)-1 { + atomic.StoreInt64(&q.threshold, (int64(scqsize)*2)-1) + } + return true + } + // Add a full queue check in the loop(CAS2). + if T+1 >= atomic.LoadUint64(&q.head)+scqsize { + // T is tail's value before FAA(1), latest tail is T+1. + return false + } + } +} + +func (q *uint64SCQ) Dequeue() (data uint64, ok bool) { + if atomic.LoadInt64(&q.threshold) < 0 { + // Empty queue. + return + } + + for { + // Decrement HEAD, try to release an entry. + H := atomic.AddUint64(&q.head, 1) + H -= 1 // we need previous value + entAddr := &q.ring[cacheRemap16Byte(H)] + cycleH := H / scqsize + dqretry: + ent := loadSCQNodeUint64(unsafe.Pointer(entAddr)) + isSafe, isEmpty, cycleEnt := loadSCQFlags(ent.flags) + if cycleEnt == cycleH { // same cycle, return this entry directly + // 1. Clear the data in this slot. + // 2. Set `isEmpty` to 1 + + resetNode(unsafe.Pointer(entAddr)) + return ent.data, true + } + if cycleEnt < cycleH { + var newEnt scqNodeUint64 + if isEmpty { + newEnt = scqNodeUint64{flags: newSCQFlags(isSafe, true, cycleH)} + } else { + newEnt = scqNodeUint64{flags: newSCQFlags(false, false, cycleEnt), data: ent.data} + } + if !compareAndSwapSCQNodeUint64(entAddr, ent, newEnt) { + goto dqretry + } + } + // Check if the queue is empty. + tailvalue := atomic.LoadUint64(&q.tail) + T := uint64Get63(tailvalue) + if T <= H+1 { + // Invalid state. + q.fixstate(H + 1) + atomic.AddInt64(&q.threshold, -1) + return + } + if atomic.AddInt64(&q.threshold, -1)+1 <= 0 { + return + } + } +} + +func (q *uint64SCQ) fixstate(originalHead uint64) { + for { + head := atomic.LoadUint64(&q.head) + if originalHead < head { + // The last dequeuer will be responsible for fixstate. + return + } + tailvalue := atomic.LoadUint64(&q.tail) + if tailvalue >= head { + // The queue has been closed, or in normal state. + return + } + if atomic.CompareAndSwapUint64(&q.tail, tailvalue, head) { + return + } + } +} diff --git a/structure/lscq/types_gen.go b/structure/lscq/types_gen.go new file mode 100644 index 0000000..95626fe --- /dev/null +++ b/structure/lscq/types_gen.go @@ -0,0 +1,105 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build ignore +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "os" + "strings" +) + +var license = `// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +` + +func main() { + f, err := os.Open("lscq.go") + if err != nil { + panic(err) + } + filedata, err := ioutil.ReadAll(f) + if err != nil { + panic(err) + } + + w := new(bytes.Buffer) + w.WriteString(license + `// Code generated by go run types_gen.go; DO NOT EDIT.` + "\r\n") + w.WriteString(string(filedata)[strings.Index(string(filedata), "package lscq") : strings.Index(string(filedata), ")\n")+1]) + // ts := []string{"Float32", "Float64", "Int64", "Int32", "Int16", "Int", "Uint64", "Uint32", "Uint16", "Uint"} // all types need to be converted + ts := []string{"Uint64"} // all types need to be converted + for _, upper := range ts { + lower := strings.ToLower(upper) + data := string(filedata) + // Remove header(imported packages). + data = data[strings.Index(data, ")\n")+1:] + // Common cases. + data = strings.Replace(data, "atomic.StorePointer((*unsafe.Pointer)(ent.data), nil)", "", -1) + data = strings.Replace(data, "NewPointer", "New"+upper, -1) + data = strings.Replace(data, "data unsafe.Pointer", "data "+lower, -1) + data = strings.Replace(data, "data unsafe.Pointer", "data "+lower, -1) + data = strings.Replace(data, "pointerSCQ", lower+"SCQ", -1) + data = strings.Replace(data, "PointerSCQ", upper+"SCQ", -1) + data = strings.Replace(data, "pointerQueue", lower+"Queue", -1) + data = strings.Replace(data, "PointerQueue", upper+"Queue", -1) + data = strings.Replace(data, "scqNodePointer", "scqNode"+upper, -1) + data = strings.Replace(data, "compareAndSwapSCQNodePointer", "compareAndSwapSCQNode"+upper, -1) + data = strings.Replace(data, "loadSCQNodePointer", "loadSCQNode"+upper, -1) + data = strings.Replace(data, "pointerSCQPool", lower+"SCQPool", -1) + data = strings.Replace(data, "atomicWriteBarrier(&entAddr.data)", "", -1) + w.WriteString(data) + w.WriteString("\r\n") + } + + out, err := format.Source(w.Bytes()) + if err != nil { + panic(err) + } + if err := ioutil.WriteFile("types.go", out, 0660); err != nil { + panic(err) + } +} + +func lowerSlice(s []string) []string { + n := make([]string, len(s)) + for i, v := range s { + n[i] = strings.ToLower(v) + } + return n +} + +func inSlice(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} diff --git a/structure/lscq/util.go b/structure/lscq/util.go new file mode 100644 index 0000000..62e979b --- /dev/null +++ b/structure/lscq/util.go @@ -0,0 +1,64 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lscq + +import ( + "unsafe" + + "golang.org/x/sys/cpu" +) + +const ( + scqsize = 1 << 16 + cacheLineSize = unsafe.Sizeof(cpu.CacheLinePad{}) +) + +func uint64Get63(value uint64) uint64 { + return value & ((1 << 63) - 1) +} + +func uint64Get1(value uint64) bool { + return (value & (1 << 63)) == (1 << 63) +} + +func uint64GetAll(value uint64) (bool, uint64) { + return (value & (1 << 63)) == (1 << 63), value & ((1 << 63) - 1) +} + +func loadSCQFlags(flags uint64) (isSafe bool, isEmpty bool, cycle uint64) { + isSafe = (flags & (1 << 63)) == (1 << 63) + isEmpty = (flags & (1 << 62)) == (1 << 62) + cycle = flags & ((1 << 62) - 1) + return isSafe, isEmpty, cycle +} + +func newSCQFlags(isSafe bool, isEmpty bool, cycle uint64) uint64 { + v := cycle & ((1 << 62) - 1) + if isSafe { + v += 1 << 63 + } + if isEmpty { + v += 1 << 62 + } + return v +} + +func cacheRemap16Byte(index uint64) uint64 { + const cacheLineSize = cacheLineSize / 2 + rawIndex := index & uint64(scqsize-1) + cacheLineNum := (rawIndex) % (scqsize / uint64(cacheLineSize)) + cacheLineIdx := rawIndex / (scqsize / uint64(cacheLineSize)) + return cacheLineNum*uint64(cacheLineSize) + cacheLineIdx +} diff --git a/structure/skipmap/README.md b/structure/skipmap/README.md new file mode 100644 index 0000000..06a26c5 --- /dev/null +++ b/structure/skipmap/README.md @@ -0,0 +1,151 @@ +## Introduction + +skipmap is a high-performance concurrent map based on skip list. In typical pattern(one million operations, 90%LOAD 9%STORE 1%DELETE), the skipmap up to 3x ~ 10x faster than the built-in sync.Map. + +The main idea behind the skipmap is [A Simple Optimistic Skiplist Algorithm](). + +Different from the sync.Map, the items in the skipmap are always sorted, and the `Load` and `Range` operations are wait-free (A goroutine is guaranteed to complete a operation as long as it keeps taking steps, regardless of the activity of other goroutines). + + + +## Features + +- Concurrent safe API with high-performance. +- Wait-free Load and Range operations. +- Sorted items. + + + +## When should you use skipmap + +In these situations, `skipmap` is better + +- **Sorted elements is needed**. +- **Concurrent calls multiple operations**. such as use both `Load` and `Store` at the same time. + +In these situations, `sync.Map` is better + +- Only one goroutine access the map for most of the time, such as insert a batch of elements and then use only `Load` (use built-in map is even better). + + + +## QuickStart + +```go +package main + +import ( + "fmt" + + "github.com/songzhibin97/gkit/structure/skipmap" +) + +func main() { + l := skipmap.NewInt() + + for _, v := range []int{10, 12, 15} { + l.Store(v, v+100) + } + + v, ok := l.Load(10) + if ok { + fmt.Println("skipmap load 10 with value ", v) + } + + l.Range(func(key int, value interface{}) bool { + fmt.Println("skipmap range found ", key, value) + return true + }) + + l.Delete(15) + fmt.Printf("skipmap contains %d items\r\n", l.Len()) +} + +``` + + + +## Benchmark + +Go version: go1.16.2 linux/amd64 + +CPU: AMD 3700x(8C16T), running at 3.6GHz + +OS: ubuntu 18.04 + +MEMORY: 16G x 2 (3200MHz) + +![benchmark](https://raw.githubusercontent.com/zhangyunhao116/public-data/master/skipmap-benchmark.png) + +```shell +$ go test -run=NOTEST -bench=. -benchtime=100000x -benchmem -count=20 -timeout=60m > x.txt +$ benchstat x.txt +``` + +``` +name time/op +Int64/Store/skipmap-16 158ns ±12% +Int64/Store/sync.Map-16 700ns ± 4% +Int64/Load50Hits/skipmap-16 10.1ns ±14% +Int64/Load50Hits/sync.Map-16 14.8ns ±23% +Int64/30Store70Load/skipmap-16 50.6ns ±20% +Int64/30Store70Load/sync.Map-16 592ns ± 7% +Int64/1Delete9Store90Load/skipmap-16 27.5ns ±13% +Int64/1Delete9Store90Load/sync.Map-16 480ns ± 4% +Int64/1Range9Delete90Store900Load/skipmap-16 34.2ns ±26% +Int64/1Range9Delete90Store900Load/sync.Map-16 1.00µs ±12% +String/Store/skipmap-16 171ns ±15% +String/Store/sync.Map-16 873ns ± 4% +String/Load50Hits/skipmap-16 21.3ns ±38% +String/Load50Hits/sync.Map-16 19.9ns ±12% +String/30Store70Load/skipmap-16 75.6ns ±16% +String/30Store70Load/sync.Map-16 726ns ± 5% +String/1Delete9Store90Load/skipmap-16 34.3ns ±20% +String/1Delete9Store90Load/sync.Map-16 584ns ± 5% +String/1Range9Delete90Store900Load/skipmap-16 41.0ns ±21% +String/1Range9Delete90Store900Load/sync.Map-16 1.17µs ± 8% + +name alloc/op +Int64/Store/skipmap-16 112B ± 0% +Int64/Store/sync.Map-16 128B ± 0% +Int64/Load50Hits/skipmap-16 0.00B +Int64/Load50Hits/sync.Map-16 0.00B +Int64/30Store70Load/skipmap-16 33.0B ± 0% +Int64/30Store70Load/sync.Map-16 81.2B ±11% +Int64/1Delete9Store90Load/skipmap-16 10.0B ± 0% +Int64/1Delete9Store90Load/sync.Map-16 57.9B ± 5% +Int64/1Range9Delete90Store900Load/skipmap-16 10.0B ± 0% +Int64/1Range9Delete90Store900Load/sync.Map-16 261B ±17% +String/Store/skipmap-16 144B ± 0% +String/Store/sync.Map-16 152B ± 0% +String/Load50Hits/skipmap-16 15.0B ± 0% +String/Load50Hits/sync.Map-16 15.0B ± 0% +String/30Store70Load/skipmap-16 54.0B ± 0% +String/30Store70Load/sync.Map-16 96.9B ±12% +String/1Delete9Store90Load/skipmap-16 27.0B ± 0% +String/1Delete9Store90Load/sync.Map-16 74.2B ± 4% +String/1Range9Delete90Store900Load/skipmap-16 27.0B ± 0% +String/1Range9Delete90Store900Load/sync.Map-16 257B ±10% + +name allocs/op +Int64/Store/skipmap-16 3.00 ± 0% +Int64/Store/sync.Map-16 4.00 ± 0% +Int64/Load50Hits/skipmap-16 0.00 +Int64/Load50Hits/sync.Map-16 0.00 +Int64/30Store70Load/skipmap-16 0.00 +Int64/30Store70Load/sync.Map-16 1.00 ± 0% +Int64/1Delete9Store90Load/skipmap-16 0.00 +Int64/1Delete9Store90Load/sync.Map-16 0.00 +Int64/1Range9Delete90Store900Load/skipmap-16 0.00 +Int64/1Range9Delete90Store900Load/sync.Map-16 0.00 +String/Store/skipmap-16 4.00 ± 0% +String/Store/sync.Map-16 5.00 ± 0% +String/Load50Hits/skipmap-16 1.00 ± 0% +String/Load50Hits/sync.Map-16 1.00 ± 0% +String/30Store70Load/skipmap-16 1.00 ± 0% +String/30Store70Load/sync.Map-16 2.00 ± 0% +String/1Delete9Store90Load/skipmap-16 1.00 ± 0% +String/1Delete9Store90Load/sync.Map-16 1.00 ± 0% +String/1Range9Delete90Store900Load/skipmap-16 1.00 ± 0% +String/1Range9Delete90Store900Load/sync.Map-16 1.00 ± 0% +``` \ No newline at end of file diff --git a/structure/skipmap/asm.s b/structure/skipmap/asm.s new file mode 100644 index 0000000..a4c2d9f --- /dev/null +++ b/structure/skipmap/asm.s @@ -0,0 +1,19 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The runtime package uses //go:linkname to push a few functions into this +// package but we still need a .s file so the Go tool does not pass -complete +// to the go tool compile so the latter does not complain about Go functions +// with no bodies. +// See https://github.com/golang/go/issues/23311 diff --git a/structure/skipmap/bench.sh b/structure/skipmap/bench.sh new file mode 100644 index 0000000..4f0bb50 --- /dev/null +++ b/structure/skipmap/bench.sh @@ -0,0 +1 @@ +go test -run=NOTEST -bench=. -count=10 -timeout=60m -benchtime=100000x -benchmem > x.txt && benchstat x.txt \ No newline at end of file diff --git a/structure/skipmap/flag.go b/structure/skipmap/flag.go new file mode 100644 index 0000000..f784737 --- /dev/null +++ b/structure/skipmap/flag.go @@ -0,0 +1,66 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import "sync/atomic" + +const ( + fullyLinked = 1 << iota + marked +) + +// concurrent-safe bitflag. +type bitflag struct { + data uint32 +} + +func (f *bitflag) SetTrue(flags uint32) { + for { + old := atomic.LoadUint32(&f.data) + if old&flags == flags { + return + } + // Flag is 0, need set it to 1. + n := old | flags + if atomic.CompareAndSwapUint32(&f.data, old, n) { + return + } + continue + } +} + +func (f *bitflag) SetFalse(flags uint32) { + for { + old := atomic.LoadUint32(&f.data) + check := old & flags + if check == 0 { + return + } + // Flag is 1, need set it to 0. + n := old ^ check + if atomic.CompareAndSwapUint32(&f.data, old, n) { + return + } + continue + } +} + +func (f *bitflag) Get(flag uint32) bool { + return (atomic.LoadUint32(&f.data) & flag) != 0 +} + +func (f *bitflag) MGet(check, expect uint32) bool { + return (atomic.LoadUint32(&f.data) & check) == expect +} diff --git a/structure/skipmap/oparray.go b/structure/skipmap/oparray.go new file mode 100644 index 0000000..ce4cbc5 --- /dev/null +++ b/structure/skipmap/oparray.go @@ -0,0 +1,60 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "sync/atomic" + "unsafe" +) + +const ( + op1 = 4 + op2 = maxLevel - op1 +) + +type optionalArray struct { + base [op1]unsafe.Pointer + extra *([op2]unsafe.Pointer) +} + +func (a *optionalArray) load(i int) unsafe.Pointer { + if i < op1 { + return a.base[i] + } + return a.extra[i-op1] +} + +func (a *optionalArray) store(i int, p unsafe.Pointer) { + if i < op1 { + a.base[i] = p + return + } + a.extra[i-op1] = p +} + +func (a *optionalArray) atomicLoad(i int) unsafe.Pointer { + if i < op1 { + return atomic.LoadPointer(&a.base[i]) + } + return atomic.LoadPointer(&a.extra[i-op1]) +} + +func (a *optionalArray) atomicStore(i int, p unsafe.Pointer) { + if i < op1 { + atomic.StorePointer(&a.base[i], p) + return + } + atomic.StorePointer(&a.extra[i-op1], p) +} diff --git a/structure/skipmap/oparry_test.go b/structure/skipmap/oparry_test.go new file mode 100644 index 0000000..d2fc7a9 --- /dev/null +++ b/structure/skipmap/oparry_test.go @@ -0,0 +1,63 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "testing" + "unsafe" +) + +type dummy struct { + data optionalArray +} + +func TestOpArray(t *testing.T) { + n := new(dummy) + n.data.extra = new([op2]unsafe.Pointer) + + var array [maxLevel]unsafe.Pointer + for i := 0; i < maxLevel; i++ { + value := unsafe.Pointer(&dummy{}) + array[i] = value + n.data.store(i, value) + } + + for i := 0; i < maxLevel; i++ { + if array[i] != n.data.load(i) || array[i] != n.data.atomicLoad(i) { + t.Fatal(i, array[i], n.data.load(i)) + } + } + + for i := 0; i < 1000; i++ { + r := int(fastrand.Uint32n(maxLevel)) + value := unsafe.Pointer(&dummy{}) + if i%100 == 0 { + value = nil + } + array[r] = value + if fastrand.Uint32n(2) == 0 { + n.data.store(r, value) + } else { + n.data.atomicStore(r, value) + } + } + + for i := 0; i < maxLevel; i++ { + if array[i] != n.data.load(i) || array[i] != n.data.atomicLoad(i) { + t.Fatal(i, array[i], n.data.load(i)) + } + } +} diff --git a/structure/skipmap/skipmap.go b/structure/skipmap/skipmap.go new file mode 100644 index 0000000..59fd05a --- /dev/null +++ b/structure/skipmap/skipmap.go @@ -0,0 +1,509 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package skipmap is a high-performance, scalable, concurrent-safe map based on skip-list. +// In the typical pattern(100000 operations, 90%LOAD 9%STORE 1%DELETE, 8C16T), the skipmap +// up to 10x faster than the built-in sync.Map. +package skipmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Int64Map represents a map based on skip list in ascending order. +type Int64Map struct { + header *int64Node + length int64 + highestLevel int64 // highest level for now +} + +type int64Node struct { + key int64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*int64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt64Node(key int64, value interface{}, level int) *int64Node { + node := &int64Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int64Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *int64Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *int64Node) loadNext(i int) *int64Node { + return (*int64Node)(n.next.load(i)) +} + +func (n *int64Node) storeNext(i int, node *int64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int64Node) atomicLoadNext(i int) *int64Node { + return (*int64Node)(n.next.atomicLoad(i)) +} + +func (n *int64Node) atomicStoreNext(i int, node *int64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int64Node) lessthan(key int64) bool { + return n.key < key +} + +func (n *int64Node) equal(key int64) bool { + return n.key == key +} + +// NewInt64 return an empty int64 skipmap. +func NewInt64() *Int64Map { + h := newInt64Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int64Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Int64Map) findNode(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) *int64Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Int64Map) findNodeDelete(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt64(preds [maxLevel]*int64Node, highestLevel int) { + var prevPred *int64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Int64Map) Store(key int64, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*int64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + + nn := newInt64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Int64Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Int64Map) Load(key int64) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Int64Map) LoadAndDelete(key int64) (value interface{}, loaded bool) { + var ( + nodeToDelete *int64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Int64Map) LoadOrStore(key int64, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + + nn := newInt64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Int64Map) LoadOrStoreLazy(key int64, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + value := f() + nn := newInt64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Int64Map) Delete(key int64) bool { + var ( + nodeToDelete *int64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Int64Map) Range(f func(key int64, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Int64Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} diff --git a/structure/skipmap/skipmap_bench_test.go b/structure/skipmap/skipmap_bench_test.go new file mode 100644 index 0000000..4e8643f --- /dev/null +++ b/structure/skipmap/skipmap_bench_test.go @@ -0,0 +1,469 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "math" + "strconv" + "sync" + "testing" +) + +const initsize = 1 << 10 // for `load` `1Delete9Store90Load` `1Range9Delete90Store900Load` +const randN = math.MaxUint32 + +func BenchmarkStore(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } + }) + }) +} + +func BenchmarkLoad100Hits(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize))) + } + }) + }) +} + +func BenchmarkLoad50Hits(b *testing.B) { + const rate = 2 + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(int64(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize * rate))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(int64(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize * rate))) + } + }) + }) +} + +func BenchmarkLoadNoHits(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + invalid := make([]int64, 0, initsize) + for i := 0; i < initsize*2; i++ { + if i%2 == 0 { + l.Store(int64(i), nil) + } else { + invalid = append(invalid, int64(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(invalid[fastrand.Uint32n(uint32(len(invalid)))]) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + invalid := make([]int64, 0, initsize) + for i := 0; i < initsize*2; i++ { + if i%2 == 0 { + l.Store(int64(i), nil) + } else { + invalid = append(invalid, int64(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(invalid[fastrand.Uint32n(uint32(len(invalid)))]) + } + }) + }) +} + +func Benchmark50Store50Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 5 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 5 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark30Store70Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark1Delete9Store90Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else if u == 10 { + l.Delete(int64(fastrand.Uint32n(randN))) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else if u == 10 { + l.Delete(int64(fastrand.Uint32n(randN))) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark1Range9Delete90Store900Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key int64, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(int64(fastrand.Uint32n(randN))) + } else if u >= 100 && u < 190 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(int64(fastrand.Uint32n(randN))) + } else if u >= 100 && u < 190 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func BenchmarkStringStore(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewString() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(strconv.Itoa(int(fastrand.Uint32())), nil) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(strconv.Itoa(int(fastrand.Uint32())), nil) + } + }) + }) +} + +func BenchmarkStringLoad50Hits(b *testing.B) { + const rate = 2 + b.Run("skipmap", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(strconv.Itoa(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(strconv.Itoa(int(fastrand.Uint32n(initsize * rate)))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(strconv.Itoa(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(strconv.Itoa(int(fastrand.Uint32n(initsize * rate)))) + } + }) + }) +} + +func BenchmarkString30Store70Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewString() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} + +func BenchmarkString1Delete9Store90Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else if u == 10 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else if u == 10 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} + +func BenchmarkString1Range9Delete90Store900Load(b *testing.B) { + b.Run("skipmap", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key string, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else if u >= 100 && u < 190 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else if u >= 100 && u < 190 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} diff --git a/structure/skipmap/skipmap_str_test.go b/structure/skipmap/skipmap_str_test.go new file mode 100644 index 0000000..aaddf17 --- /dev/null +++ b/structure/skipmap/skipmap_str_test.go @@ -0,0 +1,129 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "reflect" + "strconv" + "sync" + "sync/atomic" + "testing" +) + +func TestStringMap(t *testing.T) { + m := NewString() + + // Correctness. + m.Store("123", "123") + v, ok := m.Load("123") + if !ok || v != "123" || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Store("123", "456") + v, ok = m.Load("123") + if !ok || v != "456" || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Store("123", 456) + v, ok = m.Load("123") + if !ok || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Delete("123") + _, ok = m.Load("123") + if ok || m.Len() != 0 { + t.Fatal("invalid") + } + + _, ok = m.LoadOrStore("123", 456) + if ok || m.Len() != 1 { + t.Fatal("invalid") + } + + v, ok = m.Load("123") + if !ok || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + v, ok = m.LoadAndDelete("123") + if !ok || v != 456 || m.Len() != 0 { + t.Fatal("invalid") + } + + _, ok = m.LoadOrStore("123", 456) + if ok || m.Len() != 1 { + t.Fatal("invalid") + } + + m.LoadOrStore("456", 123) + if ok || m.Len() != 2 { + t.Fatal("invalid") + } + + m.Range(func(key string, value interface{}) bool { + if key == "123" { + m.Store("123", 123) + } else if key == "456" { + m.LoadAndDelete("456") + } + return true + }) + + v, ok = m.Load("123") + if !ok || v != 123 || m.Len() != 1 { + t.Fatal("invalid") + } + + // Concurrent. + var wg sync.WaitGroup + for i := 0; i < 1000; i++ { + i := i + wg.Add(1) + go func() { + n := strconv.Itoa(i) + m.Store(n, int(i+1000)) + wg.Done() + }() + } + wg.Wait() + var count2 int64 + m.Range(func(key string, value interface{}) bool { + atomic.AddInt64(&count2, 1) + return true + }) + m.Delete("600") + var count int64 + m.Range(func(key string, value interface{}) bool { + atomic.AddInt64(&count, 1) + return true + }) + + val, ok := m.Load("500") + if !ok || reflect.TypeOf(val).Kind().String() != "int" || val.(int) != 1500 { + t.Fatal("fail") + } + + _, ok = m.Load("600") + if ok { + t.Fatal("fail") + } + + if m.Len() != 999 || int(count) != m.Len() { + t.Fatal("fail", m.Len(), count, count2) + } +} diff --git a/structure/skipmap/skipmap_test.go b/structure/skipmap/skipmap_test.go new file mode 100644 index 0000000..5d916ec --- /dev/null +++ b/structure/skipmap/skipmap_test.go @@ -0,0 +1,339 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "math/rand" + "reflect" + "runtime" + "sync" + "sync/atomic" + "testing" +) + +func TestSkipMap(t *testing.T) { + m := NewInt() + + // Correctness. + m.Store(123, "123") + v, ok := m.Load(123) + if !ok || v != "123" || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Store(123, "456") + v, ok = m.Load(123) + if !ok || v != "456" || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Store(123, 456) + v, ok = m.Load(123) + if !ok || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + m.Delete(123) + v, ok = m.Load(123) + if ok || m.Len() != 0 || v != nil { + t.Fatal("invalid") + } + + v, loaded := m.LoadOrStore(123, 456) + if loaded || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + v, loaded = m.LoadOrStore(123, 789) + if !loaded || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + v, ok = m.Load(123) + if !ok || v != 456 || m.Len() != 1 { + t.Fatal("invalid") + } + + v, ok = m.LoadAndDelete(123) + if !ok || v != 456 || m.Len() != 0 { + t.Fatal("invalid") + } + + _, ok = m.LoadOrStore(123, 456) + if ok || m.Len() != 1 { + t.Fatal("invalid") + } + + m.LoadOrStore(456, 123) + if ok || m.Len() != 2 { + t.Fatal("invalid") + } + + m.Range(func(key int, _ interface{}) bool { + if key == 123 { + m.Store(123, 123) + } else if key == 456 { + m.LoadAndDelete(456) + } + return true + }) + + v, ok = m.Load(123) + if !ok || v != 123 || m.Len() != 1 { + t.Fatal("invalid") + } + + // Concurrent. + var wg sync.WaitGroup + for i := 0; i < 1000; i++ { + i := i + wg.Add(1) + go func() { + m.Store(i, int(i+1000)) + wg.Done() + }() + } + wg.Wait() + wg.Add(1) + go func() { + m.Delete(600) + wg.Done() + }() + wg.Wait() + wg.Add(1) + var count int64 + go func() { + m.Range(func(_ int, _ interface{}) bool { + atomic.AddInt64(&count, 1) + return true + }) + wg.Done() + }() + wg.Wait() + + val, ok := m.Load(500) + if !ok || reflect.TypeOf(val).Kind().String() != "int" || val.(int) != 1500 { + t.Fatal("fail") + } + + _, ok = m.Load(600) + if ok { + t.Fatal("fail") + } + + if m.Len() != 999 || int(count) != m.Len() { + t.Fatal("fail") + } + // Correctness 2. + var m1 sync.Map + m2 := NewUint32() + var v1, v2 interface{} + var ok1, ok2 bool + for i := 0; i < 100000; i++ { + rd := fastrand.Uint32n(10) + r1, r2 := fastrand.Uint32n(100), fastrand.Uint32n(100) + if rd == 0 { + m1.Store(r1, r2) + m2.Store(r1, r2) + } else if rd == 1 { + v1, ok1 = m1.LoadAndDelete(r1) + v2, ok2 = m2.LoadAndDelete(r1) + if ok1 != ok2 || v1 != v2 { + t.Fatal(rd, v1, ok1, v2, ok2) + } + } else if rd == 2 { + v1, ok1 = m1.LoadOrStore(r1, r2) + v2, ok2 = m2.LoadOrStore(r1, r2) + if ok1 != ok2 || v1 != v2 { + t.Fatal(rd, v1, ok1, v2, ok2, "input -> ", r1, r2) + } + } else if rd == 3 { + m1.Delete(r1) + m2.Delete(r1) + } else if rd == 4 { + m2.Range(func(key uint32, value interface{}) bool { + v, ok := m1.Load(key) + if !ok || v != value { + t.Fatal(v, ok, key, value) + } + return true + }) + } else { + v1, ok1 = m1.Load(r1) + v2, ok2 = m2.Load(r1) + if ok1 != ok2 || v1 != v2 { + t.Fatal(rd, v1, ok1, v2, ok2) + } + } + } + // Correntness 3. (LoadOrStore) + // Only one LoadorStore can successfully insert its key and value. + // And the returned value is unique. + mp := NewInt() + tmpmap := NewInt64() + samekey := 123 + var added int64 + for i := 1; i < 1000; i++ { + wg.Add(1) + go func() { + v := fastrand.Int63() + actual, loaded := mp.LoadOrStore(samekey, v) + if !loaded { + atomic.AddInt64(&added, 1) + } + tmpmap.Store(actual.(int64), nil) + wg.Done() + }() + } + wg.Wait() + if added != 1 { + t.Fatal("only one LoadOrStore can successfully insert a key and value") + } + if tmpmap.Len() != 1 { + t.Fatal("only one value can be returned from LoadOrStore") + } + // Correntness 4. (LoadAndDelete) + // Only one LoadAndDelete can successfully get a value. + mp = NewInt() + tmpmap = NewInt64() + samekey = 123 + added = 0 // int64 + mp.Store(samekey, 555) + for i := 1; i < 1000; i++ { + wg.Add(1) + go func() { + value, loaded := mp.LoadAndDelete(samekey) + if loaded { + atomic.AddInt64(&added, 1) + if value != 555 { + panic("invalid") + } + } + wg.Done() + }() + } + wg.Wait() + if added != 1 { + t.Fatal("Only one LoadAndDelete can successfully get a value") + } + + // Correntness 5. (LoadOrStoreLazy) + mp = NewInt() + tmpmap = NewInt64() + samekey = 123 + added = 0 + var fcalled int64 + valuef := func() interface{} { + atomic.AddInt64(&fcalled, 1) + return fastrand.Int63() + } + for i := 1; i < 1000; i++ { + wg.Add(1) + go func() { + actual, loaded := mp.LoadOrStoreLazy(samekey, valuef) + if !loaded { + atomic.AddInt64(&added, 1) + } + tmpmap.Store(actual.(int64), nil) + wg.Done() + }() + } + wg.Wait() + if added != 1 || fcalled != 1 { + t.Fatal("only one LoadOrStoreLazy can successfully insert a key and value") + } + if tmpmap.Len() != 1 { + t.Fatal("only one value can be returned from LoadOrStoreLazy") + } +} + +func TestSkipMapDesc(t *testing.T) { + m := NewIntDesc() + cases := []int{10, 11, 12} + for _, v := range cases { + m.Store(v, nil) + } + i := len(cases) - 1 + m.Range(func(key int, _ interface{}) bool { + if key != cases[i] { + t.Fail() + } + i-- + return true + }) +} + +/* Test from sync.Map */ +func TestConcurrentRange(t *testing.T) { + const mapSize = 1 << 10 + + m := NewInt64() + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + iters := 1 << 10 + if testing.Short() { + iters = 16 + } + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki int64, vi interface{}) bool { + k, v := ki, vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +} diff --git a/structure/skipmap/types.go b/structure/skipmap/types.go new file mode 100644 index 0000000..497c564 --- /dev/null +++ b/structure/skipmap/types.go @@ -0,0 +1,9244 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go run types_gen.go; DO NOT EDIT. +package skipmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Float32Map represents a map based on skip list in ascending order. +type Float32Map struct { + header *float32Node + length int64 + highestLevel int64 // highest level for now +} + +type float32Node struct { + key float32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*float32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat32Node(key float32, value interface{}, level int) *float32Node { + node := &float32Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float32Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *float32Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *float32Node) loadNext(i int) *float32Node { + return (*float32Node)(n.next.load(i)) +} + +func (n *float32Node) storeNext(i int, node *float32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float32Node) atomicLoadNext(i int) *float32Node { + return (*float32Node)(n.next.atomicLoad(i)) +} + +func (n *float32Node) atomicStoreNext(i int, node *float32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float32Node) lessthan(key float32) bool { + return n.key < key +} + +func (n *float32Node) equal(key float32) bool { + return n.key == key +} + +// NewFloat32 return an empty float32 skipmap. +func NewFloat32() *Float32Map { + h := newFloat32Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float32Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Float32Map) findNode(key float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) *float32Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Float32Map) findNodeDelete(key float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockFloat32(preds [maxLevel]*float32Node, highestLevel int) { + var prevPred *float32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Float32Map) Store(key float32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + + nn := newFloat32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Float32Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Float32Map) Load(key float32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Float32Map) LoadAndDelete(key float32) (value interface{}, loaded bool) { + var ( + nodeToDelete *float32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Float32Map) LoadOrStore(key float32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + + nn := newFloat32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Float32Map) LoadOrStoreLazy(key float32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + value := f() + nn := newFloat32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Float32Map) Delete(key float32) bool { + var ( + nodeToDelete *float32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Float32Map) Range(f func(key float32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Float32Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float32MapDesc represents a map based on skip list in descending order. +type Float32MapDesc struct { + header *float32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type float32NodeDesc struct { + key float32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*float32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat32NodeDesc(key float32, value interface{}, level int) *float32NodeDesc { + node := &float32NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float32NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *float32NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *float32NodeDesc) loadNext(i int) *float32NodeDesc { + return (*float32NodeDesc)(n.next.load(i)) +} + +func (n *float32NodeDesc) storeNext(i int, node *float32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float32NodeDesc) atomicLoadNext(i int) *float32NodeDesc { + return (*float32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *float32NodeDesc) atomicStoreNext(i int, node *float32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float32NodeDesc) lessthan(key float32) bool { + return n.key > key +} + +func (n *float32NodeDesc) equal(key float32) bool { + return n.key == key +} + +// NewFloat32Desc return an empty float32 skipmap. +func NewFloat32Desc() *Float32MapDesc { + h := newFloat32NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float32MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Float32MapDesc) findNode(key float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) *float32NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Float32MapDesc) findNodeDelete(key float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockFloat32Desc(preds [maxLevel]*float32NodeDesc, highestLevel int) { + var prevPred *float32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Float32MapDesc) Store(key float32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + + nn := newFloat32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Float32MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Float32MapDesc) Load(key float32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Float32MapDesc) LoadAndDelete(key float32) (value interface{}, loaded bool) { + var ( + nodeToDelete *float32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Float32MapDesc) LoadOrStore(key float32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + + nn := newFloat32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Float32MapDesc) LoadOrStoreLazy(key float32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + value := f() + nn := newFloat32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Float32MapDesc) Delete(key float32) bool { + var ( + nodeToDelete *float32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Float32MapDesc) Range(f func(key float32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Float32MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float64Map represents a map based on skip list in ascending order. +type Float64Map struct { + header *float64Node + length int64 + highestLevel int64 // highest level for now +} + +type float64Node struct { + key float64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*float64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat64Node(key float64, value interface{}, level int) *float64Node { + node := &float64Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float64Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *float64Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *float64Node) loadNext(i int) *float64Node { + return (*float64Node)(n.next.load(i)) +} + +func (n *float64Node) storeNext(i int, node *float64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float64Node) atomicLoadNext(i int) *float64Node { + return (*float64Node)(n.next.atomicLoad(i)) +} + +func (n *float64Node) atomicStoreNext(i int, node *float64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float64Node) lessthan(key float64) bool { + return n.key < key +} + +func (n *float64Node) equal(key float64) bool { + return n.key == key +} + +// NewFloat64 return an empty float64 skipmap. +func NewFloat64() *Float64Map { + h := newFloat64Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float64Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Float64Map) findNode(key float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) *float64Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Float64Map) findNodeDelete(key float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockFloat64(preds [maxLevel]*float64Node, highestLevel int) { + var prevPred *float64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Float64Map) Store(key float64, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + + nn := newFloat64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Float64Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Float64Map) Load(key float64) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Float64Map) LoadAndDelete(key float64) (value interface{}, loaded bool) { + var ( + nodeToDelete *float64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Float64Map) LoadOrStore(key float64, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + + nn := newFloat64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Float64Map) LoadOrStoreLazy(key float64, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + value := f() + nn := newFloat64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Float64Map) Delete(key float64) bool { + var ( + nodeToDelete *float64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Float64Map) Range(f func(key float64, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Float64Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float64MapDesc represents a map based on skip list in descending order. +type Float64MapDesc struct { + header *float64NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type float64NodeDesc struct { + key float64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*float64NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat64NodeDesc(key float64, value interface{}, level int) *float64NodeDesc { + node := &float64NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float64NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *float64NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *float64NodeDesc) loadNext(i int) *float64NodeDesc { + return (*float64NodeDesc)(n.next.load(i)) +} + +func (n *float64NodeDesc) storeNext(i int, node *float64NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float64NodeDesc) atomicLoadNext(i int) *float64NodeDesc { + return (*float64NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *float64NodeDesc) atomicStoreNext(i int, node *float64NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float64NodeDesc) lessthan(key float64) bool { + return n.key > key +} + +func (n *float64NodeDesc) equal(key float64) bool { + return n.key == key +} + +// NewFloat64Desc return an empty float64 skipmap. +func NewFloat64Desc() *Float64MapDesc { + h := newFloat64NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float64MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Float64MapDesc) findNode(key float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) *float64NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Float64MapDesc) findNodeDelete(key float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockFloat64Desc(preds [maxLevel]*float64NodeDesc, highestLevel int) { + var prevPred *float64NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Float64MapDesc) Store(key float64, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + + nn := newFloat64NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Float64MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Float64MapDesc) Load(key float64) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Float64MapDesc) LoadAndDelete(key float64) (value interface{}, loaded bool) { + var ( + nodeToDelete *float64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Float64MapDesc) LoadOrStore(key float64, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + + nn := newFloat64NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Float64MapDesc) LoadOrStoreLazy(key float64, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*float64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + value := f() + nn := newFloat64NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Float64MapDesc) Delete(key float64) bool { + var ( + nodeToDelete *float64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Float64MapDesc) Range(f func(key float64, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Float64MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int32Map represents a map based on skip list in ascending order. +type Int32Map struct { + header *int32Node + length int64 + highestLevel int64 // highest level for now +} + +type int32Node struct { + key int32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*int32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt32Node(key int32, value interface{}, level int) *int32Node { + node := &int32Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int32Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *int32Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *int32Node) loadNext(i int) *int32Node { + return (*int32Node)(n.next.load(i)) +} + +func (n *int32Node) storeNext(i int, node *int32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int32Node) atomicLoadNext(i int) *int32Node { + return (*int32Node)(n.next.atomicLoad(i)) +} + +func (n *int32Node) atomicStoreNext(i int, node *int32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int32Node) lessthan(key int32) bool { + return n.key < key +} + +func (n *int32Node) equal(key int32) bool { + return n.key == key +} + +// NewInt32 return an empty int32 skipmap. +func NewInt32() *Int32Map { + h := newInt32Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int32Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Int32Map) findNode(key int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) *int32Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Int32Map) findNodeDelete(key int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt32(preds [maxLevel]*int32Node, highestLevel int) { + var prevPred *int32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Int32Map) Store(key int32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + + nn := newInt32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Int32Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Int32Map) Load(key int32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Int32Map) LoadAndDelete(key int32) (value interface{}, loaded bool) { + var ( + nodeToDelete *int32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Int32Map) LoadOrStore(key int32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + + nn := newInt32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Int32Map) LoadOrStoreLazy(key int32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + value := f() + nn := newInt32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Int32Map) Delete(key int32) bool { + var ( + nodeToDelete *int32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Int32Map) Range(f func(key int32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Int32Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int32MapDesc represents a map based on skip list in descending order. +type Int32MapDesc struct { + header *int32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type int32NodeDesc struct { + key int32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*int32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt32NodeDesc(key int32, value interface{}, level int) *int32NodeDesc { + node := &int32NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int32NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *int32NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *int32NodeDesc) loadNext(i int) *int32NodeDesc { + return (*int32NodeDesc)(n.next.load(i)) +} + +func (n *int32NodeDesc) storeNext(i int, node *int32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int32NodeDesc) atomicLoadNext(i int) *int32NodeDesc { + return (*int32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *int32NodeDesc) atomicStoreNext(i int, node *int32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int32NodeDesc) lessthan(key int32) bool { + return n.key > key +} + +func (n *int32NodeDesc) equal(key int32) bool { + return n.key == key +} + +// NewInt32Desc return an empty int32 skipmap. +func NewInt32Desc() *Int32MapDesc { + h := newInt32NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int32MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Int32MapDesc) findNode(key int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) *int32NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Int32MapDesc) findNodeDelete(key int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt32Desc(preds [maxLevel]*int32NodeDesc, highestLevel int) { + var prevPred *int32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Int32MapDesc) Store(key int32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + + nn := newInt32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Int32MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Int32MapDesc) Load(key int32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Int32MapDesc) LoadAndDelete(key int32) (value interface{}, loaded bool) { + var ( + nodeToDelete *int32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Int32MapDesc) LoadOrStore(key int32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + + nn := newInt32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Int32MapDesc) LoadOrStoreLazy(key int32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + value := f() + nn := newInt32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Int32MapDesc) Delete(key int32) bool { + var ( + nodeToDelete *int32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Int32MapDesc) Range(f func(key int32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Int32MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int16Map represents a map based on skip list in ascending order. +type Int16Map struct { + header *int16Node + length int64 + highestLevel int64 // highest level for now +} + +type int16Node struct { + key int16 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*int16Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt16Node(key int16, value interface{}, level int) *int16Node { + node := &int16Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int16Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *int16Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *int16Node) loadNext(i int) *int16Node { + return (*int16Node)(n.next.load(i)) +} + +func (n *int16Node) storeNext(i int, node *int16Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int16Node) atomicLoadNext(i int) *int16Node { + return (*int16Node)(n.next.atomicLoad(i)) +} + +func (n *int16Node) atomicStoreNext(i int, node *int16Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int16Node) lessthan(key int16) bool { + return n.key < key +} + +func (n *int16Node) equal(key int16) bool { + return n.key == key +} + +// NewInt16 return an empty int16 skipmap. +func NewInt16() *Int16Map { + h := newInt16Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int16Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Int16Map) findNode(key int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) *int16Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Int16Map) findNodeDelete(key int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt16(preds [maxLevel]*int16Node, highestLevel int) { + var prevPred *int16Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Int16Map) Store(key int16, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + + nn := newInt16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Int16Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Int16Map) Load(key int16) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Int16Map) LoadAndDelete(key int16) (value interface{}, loaded bool) { + var ( + nodeToDelete *int16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Int16Map) LoadOrStore(key int16, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + + nn := newInt16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Int16Map) LoadOrStoreLazy(key int16, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + value := f() + nn := newInt16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Int16Map) Delete(key int16) bool { + var ( + nodeToDelete *int16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Int16Map) Range(f func(key int16, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Int16Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int16MapDesc represents a map based on skip list in descending order. +type Int16MapDesc struct { + header *int16NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type int16NodeDesc struct { + key int16 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*int16NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt16NodeDesc(key int16, value interface{}, level int) *int16NodeDesc { + node := &int16NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int16NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *int16NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *int16NodeDesc) loadNext(i int) *int16NodeDesc { + return (*int16NodeDesc)(n.next.load(i)) +} + +func (n *int16NodeDesc) storeNext(i int, node *int16NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int16NodeDesc) atomicLoadNext(i int) *int16NodeDesc { + return (*int16NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *int16NodeDesc) atomicStoreNext(i int, node *int16NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int16NodeDesc) lessthan(key int16) bool { + return n.key > key +} + +func (n *int16NodeDesc) equal(key int16) bool { + return n.key == key +} + +// NewInt16Desc return an empty int16 skipmap. +func NewInt16Desc() *Int16MapDesc { + h := newInt16NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int16MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Int16MapDesc) findNode(key int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) *int16NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Int16MapDesc) findNodeDelete(key int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt16Desc(preds [maxLevel]*int16NodeDesc, highestLevel int) { + var prevPred *int16NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Int16MapDesc) Store(key int16, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + + nn := newInt16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Int16MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Int16MapDesc) Load(key int16) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Int16MapDesc) LoadAndDelete(key int16) (value interface{}, loaded bool) { + var ( + nodeToDelete *int16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Int16MapDesc) LoadOrStore(key int16, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + + nn := newInt16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Int16MapDesc) LoadOrStoreLazy(key int16, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*int16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + value := f() + nn := newInt16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Int16MapDesc) Delete(key int16) bool { + var ( + nodeToDelete *int16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Int16MapDesc) Range(f func(key int16, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Int16MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// IntMap represents a map based on skip list in ascending order. +type IntMap struct { + header *intNode + length int64 + highestLevel int64 // highest level for now +} + +type intNode struct { + key int + value unsafe.Pointer // *interface{} + next optionalArray // [level]*intNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newIntNode(key int, value interface{}, level int) *intNode { + node := &intNode{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *intNode) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *intNode) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *intNode) loadNext(i int) *intNode { + return (*intNode)(n.next.load(i)) +} + +func (n *intNode) storeNext(i int, node *intNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *intNode) atomicLoadNext(i int) *intNode { + return (*intNode)(n.next.atomicLoad(i)) +} + +func (n *intNode) atomicStoreNext(i int, node *intNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *intNode) lessthan(key int) bool { + return n.key < key +} + +func (n *intNode) equal(key int) bool { + return n.key == key +} + +// NewInt return an empty int skipmap. +func NewInt() *IntMap { + h := newIntNode(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &IntMap{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *IntMap) findNode(key int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) *intNode { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *IntMap) findNodeDelete(key int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockInt(preds [maxLevel]*intNode, highestLevel int) { + var prevPred *intNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *IntMap) Store(key int, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + + nn := newIntNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *IntMap) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *IntMap) Load(key int) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *IntMap) LoadAndDelete(key int) (value interface{}, loaded bool) { + var ( + nodeToDelete *intNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *IntMap) LoadOrStore(key int, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + + nn := newIntNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *IntMap) LoadOrStoreLazy(key int, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + value := f() + nn := newIntNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *IntMap) Delete(key int) bool { + var ( + nodeToDelete *intNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *IntMap) Range(f func(key int, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *IntMap) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// IntMapDesc represents a map based on skip list in descending order. +type IntMapDesc struct { + header *intNodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type intNodeDesc struct { + key int + value unsafe.Pointer // *interface{} + next optionalArray // [level]*intNodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newIntNodeDesc(key int, value interface{}, level int) *intNodeDesc { + node := &intNodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *intNodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *intNodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *intNodeDesc) loadNext(i int) *intNodeDesc { + return (*intNodeDesc)(n.next.load(i)) +} + +func (n *intNodeDesc) storeNext(i int, node *intNodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *intNodeDesc) atomicLoadNext(i int) *intNodeDesc { + return (*intNodeDesc)(n.next.atomicLoad(i)) +} + +func (n *intNodeDesc) atomicStoreNext(i int, node *intNodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *intNodeDesc) lessthan(key int) bool { + return n.key > key +} + +func (n *intNodeDesc) equal(key int) bool { + return n.key == key +} + +// NewIntDesc return an empty int skipmap. +func NewIntDesc() *IntMapDesc { + h := newIntNodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &IntMapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *IntMapDesc) findNode(key int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) *intNodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *IntMapDesc) findNodeDelete(key int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockIntDesc(preds [maxLevel]*intNodeDesc, highestLevel int) { + var prevPred *intNodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *IntMapDesc) Store(key int, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + + nn := newIntNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *IntMapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *IntMapDesc) Load(key int) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *IntMapDesc) LoadAndDelete(key int) (value interface{}, loaded bool) { + var ( + nodeToDelete *intNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *IntMapDesc) LoadOrStore(key int, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + + nn := newIntNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *IntMapDesc) LoadOrStoreLazy(key int, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*intNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + value := f() + nn := newIntNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *IntMapDesc) Delete(key int) bool { + var ( + nodeToDelete *intNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *IntMapDesc) Range(f func(key int, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *IntMapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint64Map represents a map based on skip list in ascending order. +type Uint64Map struct { + header *uint64Node + length int64 + highestLevel int64 // highest level for now +} + +type uint64Node struct { + key uint64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUuint64Node(key uint64, value interface{}, level int) *uint64Node { + node := &uint64Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint64Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint64Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint64Node) loadNext(i int) *uint64Node { + return (*uint64Node)(n.next.load(i)) +} + +func (n *uint64Node) storeNext(i int, node *uint64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint64Node) atomicLoadNext(i int) *uint64Node { + return (*uint64Node)(n.next.atomicLoad(i)) +} + +func (n *uint64Node) atomicStoreNext(i int, node *uint64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint64Node) lessthan(key uint64) bool { + return n.key < key +} + +func (n *uint64Node) equal(key uint64) bool { + return n.key == key +} + +// NewUint64 return an empty uint64 skipmap. +func NewUint64() *Uint64Map { + h := newUuint64Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint64Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint64Map) findNode(key uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) *uint64Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint64Map) findNodeDelete(key uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint64(preds [maxLevel]*uint64Node, highestLevel int) { + var prevPred *uint64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint64Map) Store(key uint64, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + + nn := newUuint64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint64Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint64Map) Load(key uint64) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint64Map) LoadAndDelete(key uint64) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint64Map) LoadOrStore(key uint64, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + + nn := newUuint64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint64Map) LoadOrStoreLazy(key uint64, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + value := f() + nn := newUuint64Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint64Map) Delete(key uint64) bool { + var ( + nodeToDelete *uint64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint64Map) Range(f func(key uint64, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint64Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint64MapDesc represents a map based on skip list in descending order. +type Uint64MapDesc struct { + header *uint64NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint64NodeDesc struct { + key uint64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint64NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUuint64NodeDescDesc(key uint64, value interface{}, level int) *uint64NodeDesc { + node := &uint64NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint64NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint64NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint64NodeDesc) loadNext(i int) *uint64NodeDesc { + return (*uint64NodeDesc)(n.next.load(i)) +} + +func (n *uint64NodeDesc) storeNext(i int, node *uint64NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint64NodeDesc) atomicLoadNext(i int) *uint64NodeDesc { + return (*uint64NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint64NodeDesc) atomicStoreNext(i int, node *uint64NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint64NodeDesc) lessthan(key uint64) bool { + return n.key > key +} + +func (n *uint64NodeDesc) equal(key uint64) bool { + return n.key == key +} + +// NewUint64Desc return an empty uint64 skipmap. +func NewUint64Desc() *Uint64MapDesc { + h := newUuint64NodeDescDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint64MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint64MapDesc) findNode(key uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) *uint64NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint64MapDesc) findNodeDelete(key uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint64Desc(preds [maxLevel]*uint64NodeDesc, highestLevel int) { + var prevPred *uint64NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint64MapDesc) Store(key uint64, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + + nn := newUuint64NodeDescDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint64MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint64MapDesc) Load(key uint64) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint64MapDesc) LoadAndDelete(key uint64) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint64MapDesc) LoadOrStore(key uint64, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + + nn := newUuint64NodeDescDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint64MapDesc) LoadOrStoreLazy(key uint64, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + value := f() + nn := newUuint64NodeDescDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint64MapDesc) Delete(key uint64) bool { + var ( + nodeToDelete *uint64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint64MapDesc) Range(f func(key uint64, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint64MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint32Map represents a map based on skip list in ascending order. +type Uint32Map struct { + header *uint32Node + length int64 + highestLevel int64 // highest level for now +} + +type uint32Node struct { + key uint32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint32Node(key uint32, value interface{}, level int) *uint32Node { + node := &uint32Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint32Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint32Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint32Node) loadNext(i int) *uint32Node { + return (*uint32Node)(n.next.load(i)) +} + +func (n *uint32Node) storeNext(i int, node *uint32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint32Node) atomicLoadNext(i int) *uint32Node { + return (*uint32Node)(n.next.atomicLoad(i)) +} + +func (n *uint32Node) atomicStoreNext(i int, node *uint32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint32Node) lessthan(key uint32) bool { + return n.key < key +} + +func (n *uint32Node) equal(key uint32) bool { + return n.key == key +} + +// NewUint32 return an empty uint32 skipmap. +func NewUint32() *Uint32Map { + h := newUint32Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint32Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint32Map) findNode(key uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) *uint32Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint32Map) findNodeDelete(key uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint32(preds [maxLevel]*uint32Node, highestLevel int) { + var prevPred *uint32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint32Map) Store(key uint32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + + nn := newUint32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint32Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint32Map) Load(key uint32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint32Map) LoadAndDelete(key uint32) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint32Map) LoadOrStore(key uint32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + + nn := newUint32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint32Map) LoadOrStoreLazy(key uint32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + value := f() + nn := newUint32Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint32Map) Delete(key uint32) bool { + var ( + nodeToDelete *uint32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint32Map) Range(f func(key uint32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint32Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint32MapDesc represents a map based on skip list in descending order. +type Uint32MapDesc struct { + header *uint32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint32NodeDesc struct { + key uint32 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint32NodeDesc(key uint32, value interface{}, level int) *uint32NodeDesc { + node := &uint32NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint32NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint32NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint32NodeDesc) loadNext(i int) *uint32NodeDesc { + return (*uint32NodeDesc)(n.next.load(i)) +} + +func (n *uint32NodeDesc) storeNext(i int, node *uint32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint32NodeDesc) atomicLoadNext(i int) *uint32NodeDesc { + return (*uint32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint32NodeDesc) atomicStoreNext(i int, node *uint32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint32NodeDesc) lessthan(key uint32) bool { + return n.key > key +} + +func (n *uint32NodeDesc) equal(key uint32) bool { + return n.key == key +} + +// NewUint32Desc return an empty uint32 skipmap. +func NewUint32Desc() *Uint32MapDesc { + h := newUint32NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint32MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint32MapDesc) findNode(key uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) *uint32NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint32MapDesc) findNodeDelete(key uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint32Desc(preds [maxLevel]*uint32NodeDesc, highestLevel int) { + var prevPred *uint32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint32MapDesc) Store(key uint32, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + + nn := newUint32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint32MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint32MapDesc) Load(key uint32) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint32MapDesc) LoadAndDelete(key uint32) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint32MapDesc) LoadOrStore(key uint32, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + + nn := newUint32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint32MapDesc) LoadOrStoreLazy(key uint32, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + value := f() + nn := newUint32NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint32MapDesc) Delete(key uint32) bool { + var ( + nodeToDelete *uint32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint32MapDesc) Range(f func(key uint32, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint32MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint16Map represents a map based on skip list in ascending order. +type Uint16Map struct { + header *uint16Node + length int64 + highestLevel int64 // highest level for now +} + +type uint16Node struct { + key uint16 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint16Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint16Node(key uint16, value interface{}, level int) *uint16Node { + node := &uint16Node{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint16Node) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint16Node) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint16Node) loadNext(i int) *uint16Node { + return (*uint16Node)(n.next.load(i)) +} + +func (n *uint16Node) storeNext(i int, node *uint16Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint16Node) atomicLoadNext(i int) *uint16Node { + return (*uint16Node)(n.next.atomicLoad(i)) +} + +func (n *uint16Node) atomicStoreNext(i int, node *uint16Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint16Node) lessthan(key uint16) bool { + return n.key < key +} + +func (n *uint16Node) equal(key uint16) bool { + return n.key == key +} + +// NewUint16 return an empty uint16 skipmap. +func NewUint16() *Uint16Map { + h := newUint16Node(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint16Map{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint16Map) findNode(key uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) *uint16Node { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint16Map) findNodeDelete(key uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint16(preds [maxLevel]*uint16Node, highestLevel int) { + var prevPred *uint16Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint16Map) Store(key uint16, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + + nn := newUint16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint16Map) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint16Map) Load(key uint16) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint16Map) LoadAndDelete(key uint16) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint16Map) LoadOrStore(key uint16, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + + nn := newUint16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint16Map) LoadOrStoreLazy(key uint16, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16Node + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + value := f() + nn := newUint16Node(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint16Map) Delete(key uint16) bool { + var ( + nodeToDelete *uint16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16Node + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint16Map) Range(f func(key uint16, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint16Map) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint16MapDesc represents a map based on skip list in descending order. +type Uint16MapDesc struct { + header *uint16NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint16NodeDesc struct { + key uint16 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uint16NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint16NodeDesc(key uint16, value interface{}, level int) *uint16NodeDesc { + node := &uint16NodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint16NodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uint16NodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uint16NodeDesc) loadNext(i int) *uint16NodeDesc { + return (*uint16NodeDesc)(n.next.load(i)) +} + +func (n *uint16NodeDesc) storeNext(i int, node *uint16NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint16NodeDesc) atomicLoadNext(i int) *uint16NodeDesc { + return (*uint16NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint16NodeDesc) atomicStoreNext(i int, node *uint16NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint16NodeDesc) lessthan(key uint16) bool { + return n.key > key +} + +func (n *uint16NodeDesc) equal(key uint16) bool { + return n.key == key +} + +// NewUint16Desc return an empty uint16 skipmap. +func NewUint16Desc() *Uint16MapDesc { + h := newUint16NodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint16MapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *Uint16MapDesc) findNode(key uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) *uint16NodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *Uint16MapDesc) findNodeDelete(key uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint16Desc(preds [maxLevel]*uint16NodeDesc, highestLevel int) { + var prevPred *uint16NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *Uint16MapDesc) Store(key uint16, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + + nn := newUint16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *Uint16MapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *Uint16MapDesc) Load(key uint16) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *Uint16MapDesc) LoadAndDelete(key uint16) (value interface{}, loaded bool) { + var ( + nodeToDelete *uint16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *Uint16MapDesc) LoadOrStore(key uint16, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + + nn := newUint16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *Uint16MapDesc) LoadOrStoreLazy(key uint16, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16NodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + value := f() + nn := newUint16NodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *Uint16MapDesc) Delete(key uint16) bool { + var ( + nodeToDelete *uint16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16NodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *Uint16MapDesc) Range(f func(key uint16, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *Uint16MapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// UintMap represents a map based on skip list in ascending order. +type UintMap struct { + header *uintNode + length int64 + highestLevel int64 // highest level for now +} + +type uintNode struct { + key uint + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uintNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUintNode(key uint, value interface{}, level int) *uintNode { + node := &uintNode{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uintNode) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uintNode) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uintNode) loadNext(i int) *uintNode { + return (*uintNode)(n.next.load(i)) +} + +func (n *uintNode) storeNext(i int, node *uintNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uintNode) atomicLoadNext(i int) *uintNode { + return (*uintNode)(n.next.atomicLoad(i)) +} + +func (n *uintNode) atomicStoreNext(i int, node *uintNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uintNode) lessthan(key uint) bool { + return n.key < key +} + +func (n *uintNode) equal(key uint) bool { + return n.key == key +} + +// NewUint return an empty uint skipmap. +func NewUint() *UintMap { + h := newUintNode(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &UintMap{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *UintMap) findNode(key uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) *uintNode { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *UintMap) findNodeDelete(key uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUint(preds [maxLevel]*uintNode, highestLevel int) { + var prevPred *uintNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *UintMap) Store(key uint, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + + nn := newUintNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *UintMap) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *UintMap) Load(key uint) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *UintMap) LoadAndDelete(key uint) (value interface{}, loaded bool) { + var ( + nodeToDelete *uintNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *UintMap) LoadOrStore(key uint, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + + nn := newUintNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *UintMap) LoadOrStoreLazy(key uint, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + value := f() + nn := newUintNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *UintMap) Delete(key uint) bool { + var ( + nodeToDelete *uintNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *UintMap) Range(f func(key uint, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *UintMap) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// UintMapDesc represents a map based on skip list in descending order. +type UintMapDesc struct { + header *uintNodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uintNodeDesc struct { + key uint + value unsafe.Pointer // *interface{} + next optionalArray // [level]*uintNodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUintNodeDesc(key uint, value interface{}, level int) *uintNodeDesc { + node := &uintNodeDesc{ + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uintNodeDesc) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *uintNodeDesc) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *uintNodeDesc) loadNext(i int) *uintNodeDesc { + return (*uintNodeDesc)(n.next.load(i)) +} + +func (n *uintNodeDesc) storeNext(i int, node *uintNodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uintNodeDesc) atomicLoadNext(i int) *uintNodeDesc { + return (*uintNodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uintNodeDesc) atomicStoreNext(i int, node *uintNodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uintNodeDesc) lessthan(key uint) bool { + return n.key > key +} + +func (n *uintNodeDesc) equal(key uint) bool { + return n.key == key +} + +// NewUintDesc return an empty uint skipmap. +func NewUintDesc() *UintMapDesc { + h := newUintNodeDesc(0, "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &UintMapDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *UintMapDesc) findNode(key uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) *uintNodeDesc { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.equal(key) { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *UintMapDesc) findNodeDelete(key uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(key) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.equal(key) { + lFound = i + } + } + return lFound +} + +func unlockUintDesc(preds [maxLevel]*uintNodeDesc, highestLevel int) { + var prevPred *uintNodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *UintMapDesc) Store(key uint, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + + nn := newUintNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *UintMapDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *UintMapDesc) Load(key uint) (value interface{}, ok bool) { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(key) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.equal(key) { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *UintMapDesc) LoadAndDelete(key uint) (value interface{}, loaded bool) { + var ( + nodeToDelete *uintNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *UintMapDesc) LoadOrStore(key uint, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + + nn := newUintNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *UintMapDesc) LoadOrStoreLazy(key uint, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNodeDesc + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + value := f() + nn := newUintNodeDesc(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *UintMapDesc) Delete(key uint) bool { + var ( + nodeToDelete *uintNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNodeDesc + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *UintMapDesc) Range(f func(key uint, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *UintMapDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// StringMap represents a map based on skip list. +type StringMap struct { + header *stringNode + length int64 + highestLevel int64 // highest level for now +} + +type stringNode struct { + key string + score uint64 + value unsafe.Pointer // *interface{} + next optionalArray // [level]*stringNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newStringNode(key string, value interface{}, level int) *stringNode { + node := &stringNode{ + score: hash(key), + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *stringNode) storeVal(value interface{}) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *stringNode) loadVal() interface{} { + return *(*interface{})(atomic.LoadPointer(&n.value)) +} + +func (n *stringNode) loadNext(i int) *stringNode { + return (*stringNode)(n.next.load(i)) +} + +func (n *stringNode) storeNext(i int, node *stringNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *stringNode) atomicLoadNext(i int) *stringNode { + return (*stringNode)(n.next.atomicLoad(i)) +} + +func (n *stringNode) atomicStoreNext(i int, node *stringNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +// NewString return an empty int64 skipmap. +func NewString() *StringMap { + h := newStringNode("", "", maxLevel) + h.flags.SetTrue(fullyLinked) + return &StringMap{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *StringMap) findNode(key string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) *stringNode { + score := hash(key) + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.cmp(score, key) < 0 { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.cmp(score, key) == 0 { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *StringMap) findNodeDelete(key string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int { + score := hash(key) + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.cmp(score, key) < 0 { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.cmp(score, key) == 0 { + lFound = i + } + } + return lFound +} + +func unlockString(preds [maxLevel]*stringNode, highestLevel int) { + var prevPred *stringNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *StringMap) Store(key string, value interface{}) { + level := s.randomlevel() + var preds, succs [maxLevel]*stringNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + + nn := newStringNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + } +} + +func (s *StringMap) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *StringMap) Load(key string) (value interface{}, ok bool) { + score := hash(key) + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.cmp(score, key) < 0 { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.cmp(score, key) == 0 { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return nil, false + } + } + return nil, false +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *StringMap) LoadAndDelete(key string) (value interface{}, loaded bool) { + var ( + nodeToDelete *stringNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return nil, false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return nil, false + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *StringMap) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*stringNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + + nn := newStringNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *StringMap) LoadOrStoreLazy(key string, f func() interface{}) (actual interface{}, loaded bool) { + level := s.randomlevel() + var preds, succs [maxLevel]*stringNode + for { + nodeFound := s.findNode(key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockString(preds, highestLocked) + continue + } + value := f() + nn := newStringNode(key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *StringMap) Delete(key string) bool { + var ( + nodeToDelete *stringNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringNode + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *StringMap) Range(f func(key string, value interface{}) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skipmap. +func (s *StringMap) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Return 1 if n is bigger, 0 if equal, else -1. +func (n *stringNode) cmp(score uint64, key string) int { + if n.score > score { + return 1 + } else if n.score == score { + return cmpstring(n.key, key) + } + return -1 +} diff --git a/structure/skipmap/types_gen.go b/structure/skipmap/types_gen.go new file mode 100644 index 0000000..8773545 --- /dev/null +++ b/structure/skipmap/types_gen.go @@ -0,0 +1,218 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build ignore +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "os" + "strings" +) + +func main() { + f, err := os.Open("skipmap.go") + if err != nil { + panic(err) + } + filedata, err := ioutil.ReadAll(f) + if err != nil { + panic(err) + } + + w := new(bytes.Buffer) + w.WriteString(`// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +`) + // Step 1. Add file header + w.WriteString(`// Code generated by go run types_gen.go; DO NOT EDIT.` + "\r\n") + // Step 2. Add imports and package statement + w.WriteString(string(filedata)[strings.Index(string(filedata), "package skipmap") : strings.Index(string(filedata), ")\n")+1]) + // Step 3. Generate code for basic types + ts := []string{"Float32", "Float64", "Int32", "Int16", "Int", "Uint64", "Uint32", "Uint16", "Uint"} // all types need to be converted + for _, upper := range ts { + data := string(filedata) + // Step 4-1. Remove all string before import + data = data[strings.Index(data, ")\n")+1:] + // Step 4-2. Replace all cases + dataDesc := replace(data, upper, true) + dataAsc := replace(data, upper, false) + w.WriteString(dataAsc) + w.WriteString("\r\n") + w.WriteString(dataDesc) + w.WriteString("\r\n") + } + // Step 5. Generate string map + data := string(filedata) + data = data[strings.Index(data, ")\n")+1:] + datastring := replaceString(data) + w.WriteString(datastring) + w.WriteString("\r\n") + + out, err := format.Source(w.Bytes()) + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile("types.go", out, 0660); err != nil { + panic(err) + } +} + +func replace(data string, upper string, desc bool) string { + lower := strings.ToLower(upper) + + var descstr string + if desc { + descstr = "Desc" + } + data = strings.Replace(data, "NewInt64", "New"+upper+descstr, -1) + data = strings.Replace(data, "newInt64Node", "new"+upper+"Node"+descstr, -1) + data = strings.Replace(data, "unlockInt64", "unlock"+upper+descstr, -1) + data = strings.Replace(data, "Int64Map", upper+"Map"+descstr, -1) + data = strings.Replace(data, "int64Node", lower+"Node"+descstr, -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "int64 skipmap", lower+" skipmap", -1) // comment + + if desc { + // Special cases for DESC. + data = strings.Replace(data, "ascending", "descending", -1) + data = strings.Replace(data, "return n.key < key", "return n.key > key", -1) + } + return data +} + +func replaceString(data string) string { + const ( + upper = "String" + lower = "string" + ) + + // Add `score uint64` field. + data = strings.Replace(data, + `type int64Node struct { + key int64`, + `type int64Node struct { + key int64 + score uint64`, -1) + + data = strings.Replace(data, + `&int64Node{`, + `&int64Node{ + score: hash(key),`, -1) + + // Refactor comparsion. + data = data + "\n" + data += `// Return 1 if n is bigger, 0 if equal, else -1. +func (n *stringNode) cmp(score uint64, key string) int { + if n.score > score { + return 1 + } else if n.score == score { + return cmpstring(n.key, key) + } + return -1 +}` + + data = strings.Replace(data, + `.lessthan(key)`, + `.cmp(score, key) < 0`, -1) + data = strings.Replace(data, + `.equal(key)`, + `.cmp(score, key) == 0`, -1) + + // Remove `lessthan` and `equal` + data = strings.Replace(data, + `func (n *int64Node) lessthan(key int64) bool { + return n.key < key +}`, "", -1) + data = strings.Replace(data, + `func (n *int64Node) equal(key int64) bool { + return n.key == key +}`, "", -1) + + // Add "score := hash(key)" + data = addLineAfter(data, "func (s *Int64Map) findNodeDelete", "score := hash(key)") + data = addLineAfter(data, "func (s *Int64Map) findNode", "score := hash(key)") + data = addLineAfter(data, "func (s *Int64Map) Load", "score := hash(key)") + + // Update new key "newInt64Node(0" + data = strings.Replace(data, + "newInt64Node(0", `newInt64Node(""`, -1) + + data = strings.Replace(data, "NewInt64", "New"+upper, -1) + data = strings.Replace(data, "newInt64Node", "new"+upper+"Node", -1) + data = strings.Replace(data, "unlockInt64", "unlock"+upper, -1) + data = strings.Replace(data, "Int64Map", upper+"Map", -1) + data = strings.Replace(data, "int64Node", lower+"Node", -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "key int64", "key "+lower, -1) + data = strings.Replace(data, "int64 skip map", lower+" skip map", -1) // comment + data = strings.Replace(data, " in ascending order", "", -1) // comment + + return data +} + +func lowerSlice(s []string) []string { + n := make([]string, len(s)) + for i, v := range s { + n[i] = strings.ToLower(v) + } + return n +} + +func inSlice(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} + +func addLineAfter(src string, after string, added string) string { + all := strings.Split(string(src), "\n") + for i, v := range all { + if strings.Index(v, after) != -1 { + res := make([]string, len(all)+1) + for j := 0; j <= i; j++ { + res[j] = all[j] + } + res[i+1] = added + for j := i + 1; j < len(all); j++ { + res[j+1] = all[j] + } + return strings.Join(res, "\n") + } + } + panic("can not find:" + after) +} diff --git a/structure/skipmap/util.go b/structure/skipmap/util.go new file mode 100644 index 0000000..17f50d2 --- /dev/null +++ b/structure/skipmap/util.go @@ -0,0 +1,45 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipmap + +import ( + "github.com/songzhibin97/gkit/internal/wyhash" + "github.com/songzhibin97/gkit/sys/fastrand" + _ "unsafe" // for linkname +) + +const ( + maxLevel = 16 + p = 0.25 + defaultHighestLevel = 3 +) + +func hash(s string) uint64 { + return wyhash.Sum64String(s) +} + +//go:linkname cmpstring runtime.cmpstring +func cmpstring(a, b string) int + +func randomLevel() int { + level := 1 + for fastrand.Uint32n(1/p) == 0 { + level++ + } + if level > maxLevel { + return maxLevel + } + return level +} diff --git a/structure/skipset/README.md b/structure/skipset/README.md new file mode 100644 index 0000000..f512a64 --- /dev/null +++ b/structure/skipset/README.md @@ -0,0 +1,150 @@ +## Introduction + +skipset is a high-performance concurrent set based on skip list. In typical pattern(100000 operations, 90%CONTAINS 9%ADD +1%REMOVE), the skipset up to 3x ~ 15x faster than the built-in sync.Map. + +The main idea behind the skipset +is [A Simple Optimistic Skiplist Algorithm](). + +Different from the sync.Map, the items in the skipset are always sorted, and the `Contains` and `Range` operations are +wait-free (A goroutine is guaranteed to complete an operation as long as it keeps taking steps, regardless of the +activity of other goroutines). + +## Features + +- Concurrent safe API with high-performance. +- Wait-free Contains and Range operations. +- Sorted items. + +## When should you use skipset + +In these situations, `skipset` is better + +- **Sorted elements is needed**. +- **Concurrent calls multiple operations**. such as use both `Contains` and `Add` at the same time. +- **Memory intensive**. The skipset save at least 50% memory in the benchmark. + +In these situations, `sync.Map` is better + +- Only one goroutine access the set for most of the time, such as insert a batch of elements and then use + only `Contains` (use built-in map is even better). + +## QuickStart + +```go +package main + +import ( + "fmt" + + "github.com/songzhibin97/gkit/structure/skipset" +) + +func main() { + l := NewInt() + + for _, v := range []int{10, 12, 15} { + if l.Add(v) { + fmt.Println("skipset add", v) + } + } + + if l.Contains(10) { + fmt.Println("skipset contains 10") + } + + l.Range(func(value int) bool { + fmt.Println("skipset range found ", value) + return true + }) + + l.Remove(15) + fmt.Printf("skipset contains %d items\r\n", l.Len()) +} + +``` + +## Benchmark + +Go version: go1.16.2 linux/amd64 + +CPU: AMD 3700x(8C16T), running at 3.6GHz + +OS: ubuntu 18.04 + +MEMORY: 16G x 2 (3200MHz) + +![benchmark](https://raw.githubusercontent.com/zhangyunhao116/public-data/master/skipset-benchmark.png) + +```shell +$ go test -run=NOTEST -bench=. -benchtime=100000x -benchmem -count=20 -timeout=60m > x.txt +$ benchstat x.txt +``` + +``` +name time/op +Int64/Add/skipset-16 86.6ns ±11% +Int64/Add/sync.Map-16 674ns ± 6% +Int64/Contains50Hits/skipset-16 9.85ns ±12% +Int64/Contains50Hits/sync.Map-16 14.7ns ±30% +Int64/30Add70Contains/skipset-16 38.8ns ±18% +Int64/30Add70Contains/sync.Map-16 586ns ± 5% +Int64/1Remove9Add90Contains/skipset-16 24.9ns ±17% +Int64/1Remove9Add90Contains/sync.Map-16 493ns ± 5% +Int64/1Range9Remove90Add900Contains/skipset-16 25.9ns ±16% +Int64/1Range9Remove90Add900Contains/sync.Map-16 1.00µs ±12% +String/Add/skipset-16 130ns ±14% +String/Add/sync.Map-16 878ns ± 4% +String/Contains50Hits/skipset-16 18.3ns ± 9% +String/Contains50Hits/sync.Map-16 19.2ns ±18% +String/30Add70Contains/skipset-16 61.0ns ±15% +String/30Add70Contains/sync.Map-16 756ns ± 7% +String/1Remove9Add90Contains/skipset-16 31.3ns ±13% +String/1Remove9Add90Contains/sync.Map-16 614ns ± 6% +String/1Range9Remove90Add900Contains/skipset-16 36.2ns ±18% +String/1Range9Remove90Add900Contains/sync.Map-16 1.20µs ±17% + +name alloc/op +Int64/Add/skipset-16 65.0B ± 0% +Int64/Add/sync.Map-16 128B ± 1% +Int64/Contains50Hits/skipset-16 0.00B +Int64/Contains50Hits/sync.Map-16 0.00B +Int64/30Add70Contains/skipset-16 19.0B ± 0% +Int64/30Add70Contains/sync.Map-16 77.7B ±16% +Int64/1Remove9Add90Contains/skipset-16 5.00B ± 0% +Int64/1Remove9Add90Contains/sync.Map-16 57.5B ± 4% +Int64/1Range9Remove90Add900Contains/skipset-16 5.00B ± 0% +Int64/1Range9Remove90Add900Contains/sync.Map-16 255B ±22% +String/Add/skipset-16 97.0B ± 0% +String/Add/sync.Map-16 152B ± 0% +String/Contains50Hits/skipset-16 15.0B ± 0% +String/Contains50Hits/sync.Map-16 15.0B ± 0% +String/30Add70Contains/skipset-16 40.0B ± 0% +String/30Add70Contains/sync.Map-16 98.2B ±11% +String/1Remove9Add90Contains/skipset-16 23.0B ± 0% +String/1Remove9Add90Contains/sync.Map-16 73.9B ± 4% +String/1Range9Remove90Add900Contains/skipset-16 23.0B ± 0% +String/1Range9Remove90Add900Contains/sync.Map-16 261B ±18% + +name allocs/op +Int64/Add/skipset-16 1.00 ± 0% +Int64/Add/sync.Map-16 4.00 ± 0% +Int64/Contains50Hits/skipset-16 0.00 +Int64/Contains50Hits/sync.Map-16 0.00 +Int64/30Add70Contains/skipset-16 0.00 +Int64/30Add70Contains/sync.Map-16 1.00 ± 0% +Int64/1Remove9Add90Contains/skipset-16 0.00 +Int64/1Remove9Add90Contains/sync.Map-16 0.00 +Int64/1Range9Remove90Add900Contains/skipset-16 0.00 +Int64/1Range9Remove90Add900Contains/sync.Map-16 0.00 +String/Add/skipset-16 2.00 ± 0% +String/Add/sync.Map-16 5.00 ± 0% +String/Contains50Hits/skipset-16 1.00 ± 0% +String/Contains50Hits/sync.Map-16 1.00 ± 0% +String/30Add70Contains/skipset-16 1.00 ± 0% +String/30Add70Contains/sync.Map-16 2.00 ± 0% +String/1Remove9Add90Contains/skipset-16 1.00 ± 0% +String/1Remove9Add90Contains/sync.Map-16 1.00 ± 0% +String/1Range9Remove90Add900Contains/skipset-16 1.00 ± 0% +String/1Range9Remove90Add900Contains/sync.Map-16 1.00 ± 0% +``` \ No newline at end of file diff --git a/structure/skipset/asm.s b/structure/skipset/asm.s new file mode 100644 index 0000000..527a8a3 --- /dev/null +++ b/structure/skipset/asm.s @@ -0,0 +1,19 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The runtime package uses //go:linkname to push a few functions into this +// package but we still need a .s file so the Go tool does not pass -complete +// to the go tool compile so the latter does not complain about Go functions +// with no bodies. +// See https://github.com/golang/go/issues/23311 \ No newline at end of file diff --git a/structure/skipset/bench.sh b/structure/skipset/bench.sh new file mode 100644 index 0000000..478e825 --- /dev/null +++ b/structure/skipset/bench.sh @@ -0,0 +1 @@ +go test -run=NOTEST -bench=. -benchtime=100000x -benchmem -count=10 -timeout=60m > x.txt && benchstat x.txt \ No newline at end of file diff --git a/structure/skipset/flag.go b/structure/skipset/flag.go new file mode 100644 index 0000000..c9417a3 --- /dev/null +++ b/structure/skipset/flag.go @@ -0,0 +1,63 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import "sync/atomic" + +const ( + fullyLinked = 1 << iota + marked +) + +type bitflag struct { + data uint32 +} + +func (f *bitflag) SetTrue(flags uint32) { + for { + old := atomic.LoadUint32(&f.data) + if old&flags != flags { + // Flag is 0, need set it to 1. + n := old | flags + if !atomic.CompareAndSwapUint32(&f.data, old, n) { + continue + } + } + return + } +} + +func (f *bitflag) SetFalse(flags uint32) { + for { + old := atomic.LoadUint32(&f.data) + check := old & flags + if check != 0 { + // Flag is 1, need set it to 0. + n := old ^ check + if !atomic.CompareAndSwapUint32(&f.data, old, n) { + continue + } + } + return + } +} + +func (f *bitflag) Get(flag uint32) bool { + return (atomic.LoadUint32(&f.data) & flag) != 0 +} + +func (f *bitflag) MGet(check, expect uint32) bool { + return (atomic.LoadUint32(&f.data) & check) == expect +} diff --git a/structure/skipset/flag_test.go b/structure/skipset/flag_test.go new file mode 100644 index 0000000..41a4862 --- /dev/null +++ b/structure/skipset/flag_test.go @@ -0,0 +1,53 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "testing" +) + +func TestFlag(t *testing.T) { + // Correctness. + const ( + f0 = 1 << iota + f1 + f2 + f3 + f4 + f5 + f6 + f7 + ) + x := &bitflag{} + + x.SetTrue(f1 | f3) + if x.Get(f0) || !x.Get(f1) || x.Get(f2) || !x.Get(f3) || !x.MGet(f0|f1|f2|f3, f1|f3) { + t.Fatal("invalid") + } + x.SetTrue(f1) + x.SetTrue(f1 | f3) + if x.data != f1+f3 { + t.Fatal("invalid") + } + + x.SetFalse(f1 | f2) + if x.Get(f0) || x.Get(f1) || x.Get(f2) || !x.Get(f3) || !x.MGet(f0|f1|f2|f3, f3) { + t.Fatal("invalid") + } + x.SetFalse(f1 | f2) + if x.data != f3 { + t.Fatal("invalid") + } +} diff --git a/structure/skipset/oparry.go b/structure/skipset/oparry.go new file mode 100644 index 0000000..9b3ad90 --- /dev/null +++ b/structure/skipset/oparry.go @@ -0,0 +1,60 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "sync/atomic" + "unsafe" +) + +const ( + op1 = 4 + op2 = maxLevel - op1 +) + +type optionalArray struct { + base [op1]unsafe.Pointer + extra *([op2]unsafe.Pointer) +} + +func (a *optionalArray) load(i int) unsafe.Pointer { + if i < op1 { + return a.base[i] + } + return a.extra[i-op1] +} + +func (a *optionalArray) store(i int, p unsafe.Pointer) { + if i < op1 { + a.base[i] = p + return + } + a.extra[i-op1] = p +} + +func (a *optionalArray) atomicLoad(i int) unsafe.Pointer { + if i < op1 { + return atomic.LoadPointer(&a.base[i]) + } + return atomic.LoadPointer(&a.extra[i-op1]) +} + +func (a *optionalArray) atomicStore(i int, p unsafe.Pointer) { + if i < op1 { + atomic.StorePointer(&a.base[i], p) + return + } + atomic.StorePointer(&a.extra[i-op1], p) +} diff --git a/structure/skipset/oparry_test.go b/structure/skipset/oparry_test.go new file mode 100644 index 0000000..6088c8c --- /dev/null +++ b/structure/skipset/oparry_test.go @@ -0,0 +1,63 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "testing" + "unsafe" +) + +type dummy struct { + data optionalArray +} + +func TestOpArray(t *testing.T) { + n := new(dummy) + n.data.extra = new([op2]unsafe.Pointer) + + var array [maxLevel]unsafe.Pointer + for i := 0; i < maxLevel; i++ { + value := unsafe.Pointer(&dummy{}) + array[i] = value + n.data.store(i, value) + } + + for i := 0; i < maxLevel; i++ { + if array[i] != n.data.load(i) || array[i] != n.data.atomicLoad(i) { + t.Fatal(i, array[i], n.data.load(i)) + } + } + + for i := 0; i < 1000; i++ { + r := int(fastrand.Uint32n(maxLevel)) + value := unsafe.Pointer(&dummy{}) + if i%100 == 0 { + value = nil + } + array[r] = value + if fastrand.Uint32n(2) == 0 { + n.data.store(r, value) + } else { + n.data.atomicStore(r, value) + } + } + + for i := 0; i < maxLevel; i++ { + if array[i] != n.data.load(i) || array[i] != n.data.atomicLoad(i) { + t.Fatal(i, array[i], n.data.load(i)) + } + } +} diff --git a/structure/skipset/skipset.go b/structure/skipset/skipset.go new file mode 100644 index 0000000..fb0fdb3 --- /dev/null +++ b/structure/skipset/skipset.go @@ -0,0 +1,313 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package skipset is a high-performance, scalable, concurrent-safe set based on skip-list. +// In the typical pattern(100000 operations, 90%CONTAINS 9%Add 1%Remove, 8C16T), the skipset +// up to 15x faster than the built-in sync.Map. +package skipset + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Int64Set represents a set based on skip list in ascending order. +type Int64Set struct { + header *int64Node + length int64 + highestLevel int64 // highest level for now +} + +type int64Node struct { + value int64 + next optionalArray // [level]*int64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt64Node(value int64, level int) *int64Node { + node := &int64Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int64Node) loadNext(i int) *int64Node { + return (*int64Node)(n.next.load(i)) +} + +func (n *int64Node) storeNext(i int, node *int64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int64Node) atomicLoadNext(i int) *int64Node { + return (*int64Node)(n.next.atomicLoad(i)) +} + +func (n *int64Node) atomicStoreNext(i int, node *int64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int64Node) lessthan(value int64) bool { + return n.value < value +} + +func (n *int64Node) equal(value int64) bool { + return n.value == value +} + +// NewInt64 return an empty int64 skip set in ascending order. +func NewInt64() *Int64Set { + h := newInt64Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int64Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int64Set) findNodeRemove(value int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int64Set) findNodeAdd(value int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt64(preds [maxLevel]*int64Node, highestLevel int) { + var prevPred *int64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Int64Set) Add(value int64) bool { + level := s.randomLevel() + var preds, succs [maxLevel]*int64Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + + nn := newInt64Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Int64Set) randomLevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Int64Set) Contains(value int64) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Int64Set) Remove(value int64) bool { + var ( + nodeToRemove *int64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int64Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Int64Set) Range(f func(value int64) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Int64Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} diff --git a/structure/skipset/skipset_bench_test.go b/structure/skipset/skipset_bench_test.go new file mode 100644 index 0000000..7078e75 --- /dev/null +++ b/structure/skipset/skipset_bench_test.go @@ -0,0 +1,469 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "math" + "strconv" + "sync" + "testing" +) + +const initsize = 1 << 10 // for `contains` `1Remove9Add90Contains` `1Range9Remove90Add900Contains` +const randN = math.MaxUint32 + +func BenchmarkAdd(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Add(int64(fastrand.Uint32n(randN))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } + }) + }) +} + +func BenchmarkContains100Hits(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Add(int64(i)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = l.Contains(int64(fastrand.Uint32n(initsize))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize))) + } + }) + }) +} + +func BenchmarkContains50Hits(b *testing.B) { + const rate = 2 + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Add(int64(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = l.Contains(int64(fastrand.Uint32n(initsize * rate))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(int64(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(int64(fastrand.Uint32n(initsize * rate))) + } + }) + }) +} + +func BenchmarkContainsNoHits(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + invalid := make([]int64, 0, initsize) + for i := 0; i < initsize*2; i++ { + if i%2 == 0 { + l.Add(int64(i)) + } else { + invalid = append(invalid, int64(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = l.Contains(invalid[fastrand.Uint32n(uint32(len(invalid)))]) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + invalid := make([]int64, 0, initsize) + for i := 0; i < initsize*2; i++ { + if i%2 == 0 { + l.Store(int64(i), nil) + } else { + invalid = append(invalid, int64(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(invalid[fastrand.Uint32n(uint32(len(invalid)))]) + } + }) + }) +} + +func Benchmark50Add50Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 5 { + l.Add(int64(fastrand.Uint32n(randN))) + } else { + l.Contains(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 5 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark30Add70Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Add(int64(fastrand.Uint32n(randN))) + } else { + l.Contains(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark1Remove9Add90Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Add(int64(i)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Add(int64(fastrand.Uint32n(randN))) + } else if u == 10 { + l.Remove(int64(fastrand.Uint32n(randN))) + } else { + l.Contains(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(int64(fastrand.Uint32n(randN)), nil) + } else if u == 10 { + l.Delete(int64(fastrand.Uint32n(randN))) + } else { + l.Load(int64(fastrand.Uint32n(randN))) + } + } + }) + }) +} + +func Benchmark1Range9Remove90Add900Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewInt64() + for i := 0; i < initsize; i++ { + l.Add(int64(i)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(score int64) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Remove(int64(fastrand.Uint32n(randN))) + } else if u >= 100 && u < 190 { + l.Add(int64(fastrand.Uint32n(randN))) + } else { + l.Contains(int64(fastrand.Uint32n(randN))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(int64(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(fastrand.Uint32n(randN)) + } else if u >= 100 && u < 190 { + l.Store(fastrand.Uint32n(randN), nil) + } else { + l.Load(fastrand.Uint32n(randN)) + } + } + }) + }) +} + +func BenchmarkStringAdd(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewString() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Add(strconv.Itoa(int(fastrand.Uint32()))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Store(strconv.Itoa(int(fastrand.Uint32())), nil) + } + }) + }) +} + +func BenchmarkStringContains50Hits(b *testing.B) { + const rate = 2 + b.Run("skipset", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Add(strconv.Itoa(i)) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = l.Contains(strconv.Itoa(int(fastrand.Uint32n(initsize * rate)))) + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize*rate; i++ { + if fastrand.Uint32n(rate) == 0 { + l.Store(strconv.Itoa(i), nil) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, _ = l.Load(strconv.Itoa(int(fastrand.Uint32n(initsize * rate)))) + } + }) + }) +} + +func BenchmarkString30Add70Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewString() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Add(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Contains(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(10) + if u < 3 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} + +func BenchmarkString1Remove9Add90Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize; i++ { + l.Add(strconv.Itoa(i)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Add(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else if u == 10 { + l.Remove(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Contains(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(100) + if u < 9 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else if u == 10 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} + +func BenchmarkString1Range9Remove90Add900Contains(b *testing.B) { + b.Run("skipset", func(b *testing.B) { + l := NewString() + for i := 0; i < initsize; i++ { + l.Add(strconv.Itoa(i)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(score string) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Remove(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else if u >= 100 && u < 190 { + l.Add(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else { + l.Contains(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) + b.Run("sync.Map", func(b *testing.B) { + var l sync.Map + for i := 0; i < initsize; i++ { + l.Store(strconv.Itoa(i), nil) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + u := fastrand.Uint32n(1000) + if u == 0 { + l.Range(func(key, value interface{}) bool { + return true + }) + } else if u > 10 && u < 20 { + l.Delete(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } else if u >= 100 && u < 190 { + l.Store(strconv.Itoa(int(fastrand.Uint32n(randN))), nil) + } else { + l.Load(strconv.Itoa(int(fastrand.Uint32n(randN)))) + } + } + }) + }) +} diff --git a/structure/skipset/skipset_test.go b/structure/skipset/skipset_test.go new file mode 100644 index 0000000..adc527e --- /dev/null +++ b/structure/skipset/skipset_test.go @@ -0,0 +1,338 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "fmt" + "github.com/songzhibin97/gkit/sys/fastrand" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "testing" +) + +func Example() { + l := NewInt() + + for _, v := range []int{10, 12, 15} { + if l.Add(v) { + fmt.Println("skipset add", v) + } + } + + if l.Contains(10) { + fmt.Println("skipset contains 10") + } + + l.Range(func(value int) bool { + fmt.Println("skipset range found ", value) + return true + }) + + l.Remove(15) + fmt.Printf("skipset contains %d items\r\n", l.Len()) +} + +func TestIntSet(t *testing.T) { + // Correctness. + l := NewInt() + if l.length != 0 { + t.Fatal("invalid length") + } + if l.Contains(0) { + t.Fatal("invalid contains") + } + + if !l.Add(0) || l.length != 1 { + t.Fatal("invalid add") + } + if !l.Contains(0) { + t.Fatal("invalid contains") + } + if !l.Remove(0) || l.length != 0 { + t.Fatal("invalid remove") + } + + if !l.Add(20) || l.length != 1 { + t.Fatal("invalid add") + } + if !l.Add(22) || l.length != 2 { + t.Fatal("invalid add") + } + if !l.Add(21) || l.length != 3 { + t.Fatal("invalid add") + } + + var i int + l.Range(func(score int) bool { + if i == 0 && score != 20 { + t.Fatal("invalid range") + } + if i == 1 && score != 21 { + t.Fatal("invalid range") + } + if i == 2 && score != 22 { + t.Fatal("invalid range") + } + i++ + return true + }) + + if !l.Remove(21) || l.length != 2 { + t.Fatal("invalid remove") + } + + i = 0 + l.Range(func(score int) bool { + if i == 0 && score != 20 { + t.Fatal("invalid range") + } + if i == 1 && score != 22 { + t.Fatal("invalid range") + } + i++ + return true + }) + + const num = math.MaxInt16 + // Make rand shuffle array. + // The testArray contains [1,num] + testArray := make([]int, num) + testArray[0] = num + 1 + for i := 1; i < num; i++ { + // We left 0, because it is the default score for head and tail. + // If we check the skipset contains 0, there must be something wrong. + testArray[i] = int(i) + } + for i := len(testArray) - 1; i > 0; i-- { // Fisher–Yates shuffle + j := fastrand.Uint32n(uint32(i + 1)) + testArray[i], testArray[j] = testArray[j], testArray[i] + } + + // Concurrent add. + var wg sync.WaitGroup + for i := 0; i < num; i++ { + i := i + wg.Add(1) + go func() { + l.Add(testArray[i]) + wg.Done() + }() + } + wg.Wait() + if l.length != int64(num) { + t.Fatalf("invalid length expected %d, got %d", num, l.length) + } + + // Don't contains 0 after concurrent addion. + if l.Contains(0) { + t.Fatal("contains 0 after concurrent addion") + } + + // Concurrent contains. + for i := 0; i < num; i++ { + i := i + wg.Add(1) + go func() { + if !l.Contains(testArray[i]) { + wg.Done() + panic(fmt.Sprintf("add doesn't contains %d", i)) + } + wg.Done() + }() + } + wg.Wait() + + // Concurrent remove. + for i := 0; i < num; i++ { + i := i + wg.Add(1) + go func() { + if !l.Remove(testArray[i]) { + wg.Done() + panic(fmt.Sprintf("can't remove %d", i)) + } + wg.Done() + }() + } + wg.Wait() + if l.length != 0 { + t.Fatalf("invalid length expected %d, got %d", 0, l.length) + } + + // Test all methods. + const smallRndN = 1 << 8 + for i := 0; i < 1<<16; i++ { + wg.Add(1) + go func() { + r := fastrand.Uint32n(num) + if r < 333 { + l.Add(int(fastrand.Uint32n(smallRndN)) + 1) + } else if r < 666 { + l.Contains(int(fastrand.Uint32n(smallRndN)) + 1) + } else if r != 999 { + l.Remove(int(fastrand.Uint32n(smallRndN)) + 1) + } else { + var pre int + l.Range(func(score int) bool { + if score <= pre { // 0 is the default value for header and tail score + panic("invalid content") + } + pre = score + return true + }) + } + wg.Done() + }() + } + wg.Wait() + + // Correctness 2. + var ( + x = NewInt() + y = NewInt() + count = 10000 + ) + + for i := 0; i < count; i++ { + x.Add(i) + } + + for i := 0; i < 16; i++ { + wg.Add(1) + go func() { + x.Range(func(score int) bool { + if x.Remove(score) { + if !y.Add(score) { + panic("invalid add") + } + } + return true + }) + wg.Done() + }() + } + wg.Wait() + if x.Len() != 0 || y.Len() != count { + t.Fatal("invalid length") + } + + // Concurrent Add and Remove in small zone. + x = NewInt() + var ( + addcount uint64 = 0 + removecount uint64 = 0 + ) + + for i := 0; i < 16; i++ { + wg.Add(1) + go func() { + for i := 0; i < 1000; i++ { + if fastrand.Uint32n(2) == 0 { + if x.Remove(int(fastrand.Uint32n(10))) { + atomic.AddUint64(&removecount, 1) + } + } else { + if x.Add(int(fastrand.Uint32n(10))) { + atomic.AddUint64(&addcount, 1) + } + } + } + wg.Done() + }() + } + wg.Wait() + if addcount < removecount { + panic("invalid count") + } + if addcount-removecount != uint64(x.Len()) { + panic("invalid count") + } + + pre := -1 + x.Range(func(score int) bool { + if score <= pre { + panic("invalid content") + } + pre = score + return true + }) +} + +func TestIntSetDesc(t *testing.T) { + s := NewIntDesc() + nums := []int{-1, 0, 5, 12} + for _, v := range nums { + s.Add(v) + } + i := len(nums) - 1 + s.Range(func(value int) bool { + if nums[i] != value { + t.Fatal("error") + } + i-- + return true + }) +} + +func TestStringSet(t *testing.T) { + x := NewString() + if !x.Add("111") || x.Len() != 1 { + t.Fatal("invalid") + } + if !x.Add("222") || x.Len() != 2 { + t.Fatal("invalid") + } + if x.Add("111") || x.Len() != 2 { + t.Fatal("invalid") + } + if !x.Contains("111") || !x.Contains("222") { + t.Fatal("invalid") + } + if !x.Remove("111") || x.Len() != 1 { + t.Fatal("invalid") + } + if !x.Remove("222") || x.Len() != 0 { + t.Fatal("invalid") + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + i := i + go func() { + if !x.Add(strconv.Itoa(i)) { + panic("invalid") + } + wg.Done() + }() + } + wg.Wait() + + tmp := make([]int, 0, 100) + x.Range(func(val string) bool { + res, _ := strconv.Atoi(val) + tmp = append(tmp, res) + return true + }) + sort.Ints(tmp) + for i := 0; i < 100; i++ { + if i != tmp[i] { + t.Fatal("invalid") + } + } +} diff --git a/structure/skipset/types.go b/structure/skipset/types.go new file mode 100644 index 0000000..4c6cb9c --- /dev/null +++ b/structure/skipset/types.go @@ -0,0 +1,5520 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go run types_gen.go; DO NOT EDIT. +package skipset + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Float32Set represents a set based on skip list in ascending order. +type Float32Set struct { + header *float32Node + length int64 + highestLevel int64 // highest level for now +} + +type float32Node struct { + value float32 + next optionalArray // [level]*float32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat32Node(value float32, level int) *float32Node { + node := &float32Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float32Node) loadNext(i int) *float32Node { + return (*float32Node)(n.next.load(i)) +} + +func (n *float32Node) storeNext(i int, node *float32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float32Node) atomicLoadNext(i int) *float32Node { + return (*float32Node)(n.next.atomicLoad(i)) +} + +func (n *float32Node) atomicStoreNext(i int, node *float32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float32Node) lessthan(value float32) bool { + return n.value < value +} + +func (n *float32Node) equal(value float32) bool { + return n.value == value +} + +// NewFloat32 return an empty float32 skip set in ascending order. +func NewFloat32() *Float32Set { + h := newFloat32Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float32Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float32Set) findNodeRemove(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float32Set) findNodeAdd(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockFloat32(preds [maxLevel]*float32Node, highestLevel int) { + var prevPred *float32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Float32Set) Add(value float32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*float32Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + + nn := newFloat32Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Float32Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Float32Set) Contains(value float32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Float32Set) Remove(value float32) bool { + var ( + nodeToRemove *float32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockFloat32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Float32Set) Range(f func(value float32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Float32Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float32SetDesc represents a set based on skip list in descending order. +type Float32SetDesc struct { + header *float32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type float32NodeDesc struct { + value float32 + next optionalArray // [level]*float32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat32NodeDesc(value float32, level int) *float32NodeDesc { + node := &float32NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float32NodeDesc) loadNext(i int) *float32NodeDesc { + return (*float32NodeDesc)(n.next.load(i)) +} + +func (n *float32NodeDesc) storeNext(i int, node *float32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float32NodeDesc) atomicLoadNext(i int) *float32NodeDesc { + return (*float32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *float32NodeDesc) atomicStoreNext(i int, node *float32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float32NodeDesc) lessthan(value float32) bool { + return n.value > value +} + +func (n *float32NodeDesc) equal(value float32) bool { + return n.value == value +} + +// NewFloat32Desc return an empty float32 skip set in descending order. +func NewFloat32Desc() *Float32SetDesc { + h := newFloat32NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float32SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float32SetDesc) findNodeRemove(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float32SetDesc) findNodeAdd(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockFloat32Desc(preds [maxLevel]*float32NodeDesc, highestLevel int) { + var prevPred *float32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Float32SetDesc) Add(value float32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*float32NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + + nn := newFloat32NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Float32SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Float32SetDesc) Contains(value float32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Float32SetDesc) Remove(value float32) bool { + var ( + nodeToRemove *float32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float32NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockFloat32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Float32SetDesc) Range(f func(value float32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Float32SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float64Set represents a set based on skip list in ascending order. +type Float64Set struct { + header *float64Node + length int64 + highestLevel int64 // highest level for now +} + +type float64Node struct { + value float64 + next optionalArray // [level]*float64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat64Node(value float64, level int) *float64Node { + node := &float64Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float64Node) loadNext(i int) *float64Node { + return (*float64Node)(n.next.load(i)) +} + +func (n *float64Node) storeNext(i int, node *float64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float64Node) atomicLoadNext(i int) *float64Node { + return (*float64Node)(n.next.atomicLoad(i)) +} + +func (n *float64Node) atomicStoreNext(i int, node *float64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float64Node) lessthan(value float64) bool { + return n.value < value +} + +func (n *float64Node) equal(value float64) bool { + return n.value == value +} + +// NewFloat64 return an empty float64 skip set in ascending order. +func NewFloat64() *Float64Set { + h := newFloat64Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float64Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float64Set) findNodeRemove(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float64Set) findNodeAdd(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockFloat64(preds [maxLevel]*float64Node, highestLevel int) { + var prevPred *float64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Float64Set) Add(value float64) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*float64Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + + nn := newFloat64Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Float64Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Float64Set) Contains(value float64) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Float64Set) Remove(value float64) bool { + var ( + nodeToRemove *float64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockFloat64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Float64Set) Range(f func(value float64) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Float64Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Float64SetDesc represents a set based on skip list in descending order. +type Float64SetDesc struct { + header *float64NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type float64NodeDesc struct { + value float64 + next optionalArray // [level]*float64NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newFloat64NodeDesc(value float64, level int) *float64NodeDesc { + node := &float64NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *float64NodeDesc) loadNext(i int) *float64NodeDesc { + return (*float64NodeDesc)(n.next.load(i)) +} + +func (n *float64NodeDesc) storeNext(i int, node *float64NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *float64NodeDesc) atomicLoadNext(i int) *float64NodeDesc { + return (*float64NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *float64NodeDesc) atomicStoreNext(i int, node *float64NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *float64NodeDesc) lessthan(value float64) bool { + return n.value > value +} + +func (n *float64NodeDesc) equal(value float64) bool { + return n.value == value +} + +// NewFloat64Desc return an empty float64 skip set in descending order. +func NewFloat64Desc() *Float64SetDesc { + h := newFloat64NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Float64SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float64SetDesc) findNodeRemove(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Float64SetDesc) findNodeAdd(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockFloat64Desc(preds [maxLevel]*float64NodeDesc, highestLevel int) { + var prevPred *float64NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Float64SetDesc) Add(value float64) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*float64NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + + nn := newFloat64NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Float64SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Float64SetDesc) Contains(value float64) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Float64SetDesc) Remove(value float64) bool { + var ( + nodeToRemove *float64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*float64NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *float64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockFloat64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockFloat64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Float64SetDesc) Range(f func(value float64) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Float64SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int32Set represents a set based on skip list in ascending order. +type Int32Set struct { + header *int32Node + length int64 + highestLevel int64 // highest level for now +} + +type int32Node struct { + value int32 + next optionalArray // [level]*int32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt32Node(value int32, level int) *int32Node { + node := &int32Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int32Node) loadNext(i int) *int32Node { + return (*int32Node)(n.next.load(i)) +} + +func (n *int32Node) storeNext(i int, node *int32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int32Node) atomicLoadNext(i int) *int32Node { + return (*int32Node)(n.next.atomicLoad(i)) +} + +func (n *int32Node) atomicStoreNext(i int, node *int32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int32Node) lessthan(value int32) bool { + return n.value < value +} + +func (n *int32Node) equal(value int32) bool { + return n.value == value +} + +// NewInt32 return an empty int32 skip set in ascending order. +func NewInt32() *Int32Set { + h := newInt32Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int32Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int32Set) findNodeRemove(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int32Set) findNodeAdd(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt32(preds [maxLevel]*int32Node, highestLevel int) { + var prevPred *int32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Int32Set) Add(value int32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*int32Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + + nn := newInt32Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Int32Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Int32Set) Contains(value int32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Int32Set) Remove(value int32) bool { + var ( + nodeToRemove *int32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Int32Set) Range(f func(value int32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Int32Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int32SetDesc represents a set based on skip list in descending order. +type Int32SetDesc struct { + header *int32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type int32NodeDesc struct { + value int32 + next optionalArray // [level]*int32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt32NodeDesc(value int32, level int) *int32NodeDesc { + node := &int32NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int32NodeDesc) loadNext(i int) *int32NodeDesc { + return (*int32NodeDesc)(n.next.load(i)) +} + +func (n *int32NodeDesc) storeNext(i int, node *int32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int32NodeDesc) atomicLoadNext(i int) *int32NodeDesc { + return (*int32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *int32NodeDesc) atomicStoreNext(i int, node *int32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int32NodeDesc) lessthan(value int32) bool { + return n.value > value +} + +func (n *int32NodeDesc) equal(value int32) bool { + return n.value == value +} + +// NewInt32Desc return an empty int32 skip set in descending order. +func NewInt32Desc() *Int32SetDesc { + h := newInt32NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int32SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int32SetDesc) findNodeRemove(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int32SetDesc) findNodeAdd(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt32Desc(preds [maxLevel]*int32NodeDesc, highestLevel int) { + var prevPred *int32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Int32SetDesc) Add(value int32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*int32NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + + nn := newInt32NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Int32SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Int32SetDesc) Contains(value int32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Int32SetDesc) Remove(value int32) bool { + var ( + nodeToRemove *int32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int32NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Int32SetDesc) Range(f func(value int32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Int32SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int16Set represents a set based on skip list in ascending order. +type Int16Set struct { + header *int16Node + length int64 + highestLevel int64 // highest level for now +} + +type int16Node struct { + value int16 + next optionalArray // [level]*int16Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt16Node(value int16, level int) *int16Node { + node := &int16Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int16Node) loadNext(i int) *int16Node { + return (*int16Node)(n.next.load(i)) +} + +func (n *int16Node) storeNext(i int, node *int16Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int16Node) atomicLoadNext(i int) *int16Node { + return (*int16Node)(n.next.atomicLoad(i)) +} + +func (n *int16Node) atomicStoreNext(i int, node *int16Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int16Node) lessthan(value int16) bool { + return n.value < value +} + +func (n *int16Node) equal(value int16) bool { + return n.value == value +} + +// NewInt16 return an empty int16 skip set in ascending order. +func NewInt16() *Int16Set { + h := newInt16Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int16Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int16Set) findNodeRemove(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int16Set) findNodeAdd(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt16(preds [maxLevel]*int16Node, highestLevel int) { + var prevPred *int16Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Int16Set) Add(value int16) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*int16Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + + nn := newInt16Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Int16Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Int16Set) Contains(value int16) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Int16Set) Remove(value int16) bool { + var ( + nodeToRemove *int16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Int16Set) Range(f func(value int16) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Int16Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Int16SetDesc represents a set based on skip list in descending order. +type Int16SetDesc struct { + header *int16NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type int16NodeDesc struct { + value int16 + next optionalArray // [level]*int16NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newInt16NodeDesc(value int16, level int) *int16NodeDesc { + node := &int16NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *int16NodeDesc) loadNext(i int) *int16NodeDesc { + return (*int16NodeDesc)(n.next.load(i)) +} + +func (n *int16NodeDesc) storeNext(i int, node *int16NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *int16NodeDesc) atomicLoadNext(i int) *int16NodeDesc { + return (*int16NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *int16NodeDesc) atomicStoreNext(i int, node *int16NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *int16NodeDesc) lessthan(value int16) bool { + return n.value > value +} + +func (n *int16NodeDesc) equal(value int16) bool { + return n.value == value +} + +// NewInt16Desc return an empty int16 skip set in descending order. +func NewInt16Desc() *Int16SetDesc { + h := newInt16NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Int16SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int16SetDesc) findNodeRemove(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Int16SetDesc) findNodeAdd(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt16Desc(preds [maxLevel]*int16NodeDesc, highestLevel int) { + var prevPred *int16NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Int16SetDesc) Add(value int16) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*int16NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + + nn := newInt16NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Int16SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Int16SetDesc) Contains(value int16) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Int16SetDesc) Remove(value int16) bool { + var ( + nodeToRemove *int16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*int16NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *int16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Int16SetDesc) Range(f func(value int16) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Int16SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// IntSet represents a set based on skip list in ascending order. +type IntSet struct { + header *intNode + length int64 + highestLevel int64 // highest level for now +} + +type intNode struct { + value int + next optionalArray // [level]*intNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newIntNode(value int, level int) *intNode { + node := &intNode{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *intNode) loadNext(i int) *intNode { + return (*intNode)(n.next.load(i)) +} + +func (n *intNode) storeNext(i int, node *intNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *intNode) atomicLoadNext(i int) *intNode { + return (*intNode)(n.next.atomicLoad(i)) +} + +func (n *intNode) atomicStoreNext(i int, node *intNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *intNode) lessthan(value int) bool { + return n.value < value +} + +func (n *intNode) equal(value int) bool { + return n.value == value +} + +// NewInt return an empty int skip set in ascending order. +func NewInt() *IntSet { + h := newIntNode(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &IntSet{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *IntSet) findNodeRemove(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *IntSet) findNodeAdd(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockInt(preds [maxLevel]*intNode, highestLevel int) { + var prevPred *intNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *IntSet) Add(value int) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*intNode + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + + nn := newIntNode(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *IntSet) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *IntSet) Contains(value int) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *IntSet) Remove(value int) bool { + var ( + nodeToRemove *intNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNode + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockInt(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockInt(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *IntSet) Range(f func(value int) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *IntSet) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// IntSetDesc represents a set based on skip list in descending order. +type IntSetDesc struct { + header *intNodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type intNodeDesc struct { + value int + next optionalArray // [level]*intNodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newIntNodeDesc(value int, level int) *intNodeDesc { + node := &intNodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *intNodeDesc) loadNext(i int) *intNodeDesc { + return (*intNodeDesc)(n.next.load(i)) +} + +func (n *intNodeDesc) storeNext(i int, node *intNodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *intNodeDesc) atomicLoadNext(i int) *intNodeDesc { + return (*intNodeDesc)(n.next.atomicLoad(i)) +} + +func (n *intNodeDesc) atomicStoreNext(i int, node *intNodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *intNodeDesc) lessthan(value int) bool { + return n.value > value +} + +func (n *intNodeDesc) equal(value int) bool { + return n.value == value +} + +// NewIntDesc return an empty int skip set in descending order. +func NewIntDesc() *IntSetDesc { + h := newIntNodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &IntSetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *IntSetDesc) findNodeRemove(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *IntSetDesc) findNodeAdd(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockIntDesc(preds [maxLevel]*intNodeDesc, highestLevel int) { + var prevPred *intNodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *IntSetDesc) Add(value int) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*intNodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + + nn := newIntNodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *IntSetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *IntSetDesc) Contains(value int) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *IntSetDesc) Remove(value int) bool { + var ( + nodeToRemove *intNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*intNodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *intNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockIntDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockIntDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *IntSetDesc) Range(f func(value int) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *IntSetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint64Set represents a set based on skip list in ascending order. +type Uint64Set struct { + header *uint64Node + length int64 + highestLevel int64 // highest level for now +} + +type uint64Node struct { + value uint64 + next optionalArray // [level]*uint64Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUuint64Node(value uint64, level int) *uint64Node { + node := &uint64Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint64Node) loadNext(i int) *uint64Node { + return (*uint64Node)(n.next.load(i)) +} + +func (n *uint64Node) storeNext(i int, node *uint64Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint64Node) atomicLoadNext(i int) *uint64Node { + return (*uint64Node)(n.next.atomicLoad(i)) +} + +func (n *uint64Node) atomicStoreNext(i int, node *uint64Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint64Node) lessthan(value uint64) bool { + return n.value < value +} + +func (n *uint64Node) equal(value uint64) bool { + return n.value == value +} + +// NewUint64 return an empty uint64 skip set in ascending order. +func NewUint64() *Uint64Set { + h := newUuint64Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint64Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint64Set) findNodeRemove(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint64Set) findNodeAdd(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint64(preds [maxLevel]*uint64Node, highestLevel int) { + var prevPred *uint64Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint64Set) Add(value uint64) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + + nn := newUuint64Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint64Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint64Set) Contains(value uint64) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint64Set) Remove(value uint64) bool { + var ( + nodeToRemove *uint64Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint64(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint64Set) Range(f func(value uint64) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint64Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint64SetDesc represents a set based on skip list in descending order. +type Uint64SetDesc struct { + header *uint64NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint64NodeDesc struct { + value uint64 + next optionalArray // [level]*uint64NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUuint64NodeDescDesc(value uint64, level int) *uint64NodeDesc { + node := &uint64NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint64NodeDesc) loadNext(i int) *uint64NodeDesc { + return (*uint64NodeDesc)(n.next.load(i)) +} + +func (n *uint64NodeDesc) storeNext(i int, node *uint64NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint64NodeDesc) atomicLoadNext(i int) *uint64NodeDesc { + return (*uint64NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint64NodeDesc) atomicStoreNext(i int, node *uint64NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint64NodeDesc) lessthan(value uint64) bool { + return n.value > value +} + +func (n *uint64NodeDesc) equal(value uint64) bool { + return n.value == value +} + +// NewUint64Desc return an empty uint64 skip set in descending order. +func NewUint64Desc() *Uint64SetDesc { + h := newUuint64NodeDescDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint64SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint64SetDesc) findNodeRemove(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint64SetDesc) findNodeAdd(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint64Desc(preds [maxLevel]*uint64NodeDesc, highestLevel int) { + var prevPred *uint64NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint64SetDesc) Add(value uint64) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint64NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + + nn := newUuint64NodeDescDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint64SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint64SetDesc) Contains(value uint64) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint64SetDesc) Remove(value uint64) bool { + var ( + nodeToRemove *uint64NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint64NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint64NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint64Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint64Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint64SetDesc) Range(f func(value uint64) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint64SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint32Set represents a set based on skip list in ascending order. +type Uint32Set struct { + header *uint32Node + length int64 + highestLevel int64 // highest level for now +} + +type uint32Node struct { + value uint32 + next optionalArray // [level]*uint32Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint32Node(value uint32, level int) *uint32Node { + node := &uint32Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint32Node) loadNext(i int) *uint32Node { + return (*uint32Node)(n.next.load(i)) +} + +func (n *uint32Node) storeNext(i int, node *uint32Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint32Node) atomicLoadNext(i int) *uint32Node { + return (*uint32Node)(n.next.atomicLoad(i)) +} + +func (n *uint32Node) atomicStoreNext(i int, node *uint32Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint32Node) lessthan(value uint32) bool { + return n.value < value +} + +func (n *uint32Node) equal(value uint32) bool { + return n.value == value +} + +// NewUint32 return an empty uint32 skip set in ascending order. +func NewUint32() *Uint32Set { + h := newUint32Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint32Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint32Set) findNodeRemove(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint32Set) findNodeAdd(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint32(preds [maxLevel]*uint32Node, highestLevel int) { + var prevPred *uint32Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint32Set) Add(value uint32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + + nn := newUint32Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint32Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint32Set) Contains(value uint32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint32Set) Remove(value uint32) bool { + var ( + nodeToRemove *uint32Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint32(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint32Set) Range(f func(value uint32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint32Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint32SetDesc represents a set based on skip list in descending order. +type Uint32SetDesc struct { + header *uint32NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint32NodeDesc struct { + value uint32 + next optionalArray // [level]*uint32NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint32NodeDesc(value uint32, level int) *uint32NodeDesc { + node := &uint32NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint32NodeDesc) loadNext(i int) *uint32NodeDesc { + return (*uint32NodeDesc)(n.next.load(i)) +} + +func (n *uint32NodeDesc) storeNext(i int, node *uint32NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint32NodeDesc) atomicLoadNext(i int) *uint32NodeDesc { + return (*uint32NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint32NodeDesc) atomicStoreNext(i int, node *uint32NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint32NodeDesc) lessthan(value uint32) bool { + return n.value > value +} + +func (n *uint32NodeDesc) equal(value uint32) bool { + return n.value == value +} + +// NewUint32Desc return an empty uint32 skip set in descending order. +func NewUint32Desc() *Uint32SetDesc { + h := newUint32NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint32SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint32SetDesc) findNodeRemove(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint32SetDesc) findNodeAdd(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint32Desc(preds [maxLevel]*uint32NodeDesc, highestLevel int) { + var prevPred *uint32NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint32SetDesc) Add(value uint32) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint32NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + + nn := newUint32NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint32SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint32SetDesc) Contains(value uint32) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint32SetDesc) Remove(value uint32) bool { + var ( + nodeToRemove *uint32NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint32NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint32NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint32Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint32Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint32SetDesc) Range(f func(value uint32) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint32SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint16Set represents a set based on skip list in ascending order. +type Uint16Set struct { + header *uint16Node + length int64 + highestLevel int64 // highest level for now +} + +type uint16Node struct { + value uint16 + next optionalArray // [level]*uint16Node + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint16Node(value uint16, level int) *uint16Node { + node := &uint16Node{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint16Node) loadNext(i int) *uint16Node { + return (*uint16Node)(n.next.load(i)) +} + +func (n *uint16Node) storeNext(i int, node *uint16Node) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint16Node) atomicLoadNext(i int) *uint16Node { + return (*uint16Node)(n.next.atomicLoad(i)) +} + +func (n *uint16Node) atomicStoreNext(i int, node *uint16Node) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint16Node) lessthan(value uint16) bool { + return n.value < value +} + +func (n *uint16Node) equal(value uint16) bool { + return n.value == value +} + +// NewUint16 return an empty uint16 skip set in ascending order. +func NewUint16() *Uint16Set { + h := newUint16Node(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint16Set{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint16Set) findNodeRemove(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint16Set) findNodeAdd(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint16(preds [maxLevel]*uint16Node, highestLevel int) { + var prevPred *uint16Node + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint16Set) Add(value uint16) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16Node + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + + nn := newUint16Node(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint16Set) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint16Set) Contains(value uint16) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint16Set) Remove(value uint16) bool { + var ( + nodeToRemove *uint16Node + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16Node + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16Node + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint16(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint16Set) Range(f func(value uint16) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint16Set) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Uint16SetDesc represents a set based on skip list in descending order. +type Uint16SetDesc struct { + header *uint16NodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uint16NodeDesc struct { + value uint16 + next optionalArray // [level]*uint16NodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUint16NodeDesc(value uint16, level int) *uint16NodeDesc { + node := &uint16NodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uint16NodeDesc) loadNext(i int) *uint16NodeDesc { + return (*uint16NodeDesc)(n.next.load(i)) +} + +func (n *uint16NodeDesc) storeNext(i int, node *uint16NodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uint16NodeDesc) atomicLoadNext(i int) *uint16NodeDesc { + return (*uint16NodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uint16NodeDesc) atomicStoreNext(i int, node *uint16NodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uint16NodeDesc) lessthan(value uint16) bool { + return n.value > value +} + +func (n *uint16NodeDesc) equal(value uint16) bool { + return n.value == value +} + +// NewUint16Desc return an empty uint16 skip set in descending order. +func NewUint16Desc() *Uint16SetDesc { + h := newUint16NodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &Uint16SetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint16SetDesc) findNodeRemove(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *Uint16SetDesc) findNodeAdd(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint16Desc(preds [maxLevel]*uint16NodeDesc, highestLevel int) { + var prevPred *uint16NodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *Uint16SetDesc) Add(value uint16) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uint16NodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + + nn := newUint16NodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *Uint16SetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *Uint16SetDesc) Contains(value uint16) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *Uint16SetDesc) Remove(value uint16) bool { + var ( + nodeToRemove *uint16NodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uint16NodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uint16NodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint16Desc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint16Desc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *Uint16SetDesc) Range(f func(value uint16) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *Uint16SetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// UintSet represents a set based on skip list in ascending order. +type UintSet struct { + header *uintNode + length int64 + highestLevel int64 // highest level for now +} + +type uintNode struct { + value uint + next optionalArray // [level]*uintNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUintNode(value uint, level int) *uintNode { + node := &uintNode{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uintNode) loadNext(i int) *uintNode { + return (*uintNode)(n.next.load(i)) +} + +func (n *uintNode) storeNext(i int, node *uintNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uintNode) atomicLoadNext(i int) *uintNode { + return (*uintNode)(n.next.atomicLoad(i)) +} + +func (n *uintNode) atomicStoreNext(i int, node *uintNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uintNode) lessthan(value uint) bool { + return n.value < value +} + +func (n *uintNode) equal(value uint) bool { + return n.value == value +} + +// NewUint return an empty uint skip set in ascending order. +func NewUint() *UintSet { + h := newUintNode(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &UintSet{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *UintSet) findNodeRemove(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *UintSet) findNodeAdd(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUint(preds [maxLevel]*uintNode, highestLevel int) { + var prevPred *uintNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *UintSet) Add(value uint) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNode + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + + nn := newUintNode(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *UintSet) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *UintSet) Contains(value uint) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *UintSet) Remove(value uint) bool { + var ( + nodeToRemove *uintNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNode + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUint(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUint(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *UintSet) Range(f func(value uint) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *UintSet) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// UintSetDesc represents a set based on skip list in descending order. +type UintSetDesc struct { + header *uintNodeDesc + length int64 + highestLevel int64 // highest level for now +} + +type uintNodeDesc struct { + value uint + next optionalArray // [level]*uintNodeDesc + mu sync.Mutex + flags bitflag + level uint32 +} + +func newUintNodeDesc(value uint, level int) *uintNodeDesc { + node := &uintNodeDesc{ + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *uintNodeDesc) loadNext(i int) *uintNodeDesc { + return (*uintNodeDesc)(n.next.load(i)) +} + +func (n *uintNodeDesc) storeNext(i int, node *uintNodeDesc) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *uintNodeDesc) atomicLoadNext(i int) *uintNodeDesc { + return (*uintNodeDesc)(n.next.atomicLoad(i)) +} + +func (n *uintNodeDesc) atomicStoreNext(i int, node *uintNodeDesc) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +func (n *uintNodeDesc) lessthan(value uint) bool { + return n.value > value +} + +func (n *uintNodeDesc) equal(value uint) bool { + return n.value == value +} + +// NewUintDesc return an empty uint skip set in descending order. +func NewUintDesc() *UintSetDesc { + h := newUintNodeDesc(0, maxLevel) + h.flags.SetTrue(fullyLinked) + return &UintSetDesc{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *UintSetDesc) findNodeRemove(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.equal(value) { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *UintSetDesc) findNodeAdd(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.lessthan(value) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.equal(value) { + return i + } + } + return -1 +} + +func unlockUintDesc(preds [maxLevel]*uintNodeDesc, highestLevel int) { + var prevPred *uintNodeDesc + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *UintSetDesc) Add(value uint) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*uintNodeDesc + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + + nn := newUintNodeDesc(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *UintSetDesc) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *UintSetDesc) Contains(value uint) bool { + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.lessthan(value) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.equal(value) { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *UintSetDesc) Remove(value uint) bool { + var ( + nodeToRemove *uintNodeDesc + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*uintNodeDesc + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *uintNodeDesc + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockUintDesc(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockUintDesc(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *UintSetDesc) Range(f func(value uint) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *UintSetDesc) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// StringSet represents a set based on skip list. +type StringSet struct { + header *stringNode + length int64 + highestLevel int64 // highest level for now +} + +type stringNode struct { + value string + score uint64 + next optionalArray // [level]*stringNode + mu sync.Mutex + flags bitflag + level uint32 +} + +func newStringNode(value string, level int) *stringNode { + node := &stringNode{ + score: hash(value), + value: value, + level: uint32(level), + } + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *stringNode) loadNext(i int) *stringNode { + return (*stringNode)(n.next.load(i)) +} + +func (n *stringNode) storeNext(i int, node *stringNode) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *stringNode) atomicLoadNext(i int) *stringNode { + return (*stringNode)(n.next.atomicLoad(i)) +} + +func (n *stringNode) atomicStoreNext(i int, node *stringNode) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +// NewString return an empty string skip set. +func NewString() *StringSet { + h := newStringNode("", maxLevel) + h.flags.SetTrue(fullyLinked) + return &StringSet{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *StringSet) findNodeRemove(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int { + score := hash(value) + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.cmp(score, value) < 0 { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if lFound == -1 && succ != nil && succ.cmp(score, value) == 0 { + lFound = i + } + } + return lFound +} + +// findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set. +// The returned preds and succs always satisfy preds[i] > value >= succs[i]. +func (s *StringSet) findNodeAdd(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int { + score := hash(value) + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && succ.cmp(score, value) < 0 { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the value already in the skip list. + if succ != nil && succ.cmp(score, value) == 0 { + return i + } + } + return -1 +} + +func unlockString(preds [maxLevel]*stringNode, highestLevel int) { + var prevPred *stringNode + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Add add the value into skip set, return true if this process insert the value into skip set, +// return false if this process can't insert this value, because another process has insert the same value. +// +// If the value is in the skip set but not fully linked, this process will wait until it is. +func (s *StringSet) Add(value string) bool { + level := s.randomlevel() + var preds, succs [maxLevel]*stringNode + for { + lFound := s.findNodeAdd(value, &preds, &succs) + if lFound != -1 { // indicating the value is already in the skip-list + nodeFound := succs[lFound] + if !nodeFound.flags.Get(marked) { + for !nodeFound.flags.Get(fullyLinked) { + // The node is not yet fully linked, just waits until it is. + } + return false + } + // If the node is marked, represents some other thread is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + + nn := newStringNode(value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return true + } +} + +func (s *StringSet) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadInt64(&s.highestLevel) + if int64(level) <= hl { + break + } + if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { + break + } + } + return level +} + +// Contains check if the value is in the skip set. +func (s *StringSet) Contains(value string) bool { + score := hash(value) + x := s.header + for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && nex.cmp(score, value) < 0 { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the value already in the skip list. + if nex != nil && nex.cmp(score, value) == 0 { + return nex.flags.MGet(fullyLinked|marked, fullyLinked) + } + } + return false +} + +// Remove a node from the skip set. +func (s *StringSet) Remove(value string) bool { + var ( + nodeToRemove *stringNode + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringNode + ) + for { + lFound := s.findNodeRemove(value, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToRemove = succs[lFound] + topLayer = lFound + nodeToRemove.mu.Lock() + if nodeToRemove.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToRemove.mu.Unlock() + return false + } + nodeToRemove.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringNode + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is removed by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockString(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToRemove`, no other goroutine will modify it. + // So we don't need `nodeToRemove.loadNext` + preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i)) + } + nodeToRemove.mu.Unlock() + unlockString(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each value present in the skip set. +// If f returns false, range stops the iteration. +func (s *StringSet) Range(f func(value string) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.value) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len return the length of this skip set. +func (s *StringSet) Len() int { + return int(atomic.LoadInt64(&s.length)) +} + +// Return 1 if n is bigger, 0 if equal, else -1. +func (n *stringNode) cmp(score uint64, value string) int { + if n.score > score { + return 1 + } else if n.score == score { + return cmpstring(n.value, value) + } + return -1 +} diff --git a/structure/skipset/types_gen.go b/structure/skipset/types_gen.go new file mode 100644 index 0000000..a059864 --- /dev/null +++ b/structure/skipset/types_gen.go @@ -0,0 +1,214 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build ignore +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "os" + "strings" +) + +func main() { + f, err := os.Open("skipset.go") + if err != nil { + panic(err) + } + filedata, err := ioutil.ReadAll(f) + if err != nil { + panic(err) + } + + w := new(bytes.Buffer) + w.WriteString(`// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +`) + // Step 1. Add file header + w.WriteString(`// Code generated by go run types_gen.go; DO NOT EDIT.` + "\r\n") + // Step 2. Add imports and package statement + w.WriteString(string(filedata)[strings.Index(string(filedata), "package skipset") : strings.Index(string(filedata), ")\n")+1]) + // Step 3. Generate code for all basic types + ts := []string{"Float32", "Float64", "Int32", "Int16", "Int", "Uint64", "Uint32", "Uint16", "Uint"} // all types need to be converted + for _, upper := range ts { + data := string(filedata) + // Step 4-1. Remove all string before import + data = data[strings.Index(data, ")\n")+1:] + // Step 4-2. Replace all cases + dataDesc := replace(data, upper, true) + dataAsc := replace(data, upper, false) + w.WriteString(dataAsc) + w.WriteString("\r\n") + w.WriteString(dataDesc) + w.WriteString("\r\n") + } + // Step 5. Generate string set + data := string(filedata) + data = data[strings.Index(data, ")\n")+1:] + datastring := replaceString(data) + w.WriteString(datastring) + w.WriteString("\r\n") + + out, err := format.Source(w.Bytes()) + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile("types.go", out, 0660); err != nil { + panic(err) + } +} + +func replace(data string, upper string, desc bool) string { + lower := strings.ToLower(upper) + + var descstr string + if desc { + descstr = "Desc" + } + data = strings.Replace(data, "NewInt64", "New"+upper+descstr, -1) + data = strings.Replace(data, "newInt64Node", "new"+upper+"Node"+descstr, -1) + data = strings.Replace(data, "unlockInt64", "unlock"+upper+descstr, -1) + data = strings.Replace(data, "Int64Set", upper+"Set"+descstr, -1) + data = strings.Replace(data, "int64Node", lower+"Node"+descstr, -1) + data = strings.Replace(data, "value int64", "value "+lower, -1) + data = strings.Replace(data, "int64 skip set", lower+" skip set", -1) // comment + + if desc { + // Special cases for DESC. + data = strings.Replace(data, "ascending", "descending", -1) + data = strings.Replace(data, "return n.value < value", "return n.value > value", -1) + } + return data +} + +func replaceString(data string) string { + const ( + upper = "String" + lower = "string" + ) + + // Add `score uint64` field. + data = strings.Replace(data, + `type int64Node struct { + value int64`, + `type int64Node struct { + value int64 + score uint64`, -1) + + data = strings.Replace(data, + `&int64Node{`, + `&int64Node{ + score: hash(value),`, -1) + + // Refactor comparsion. + data = data + "\n" + data += `// Return 1 if n is bigger, 0 if equal, else -1. +func (n *stringNode) cmp(score uint64, value string) int { + if n.score > score { + return 1 + } else if n.score == score { + return cmpstring(n.value, value) + } + return -1 +}` + + data = strings.Replace(data, + `.lessthan(value)`, + `.cmp(score, value) < 0`, -1) + data = strings.Replace(data, + `.equal(value)`, + `.cmp(score, value) == 0`, -1) + + // Remove `lessthan` and `equal` + data = strings.Replace(data, + `func (n *int64Node) lessthan(value int64) bool { + return n.value < value +}`, "", -1) + data = strings.Replace(data, + `func (n *int64Node) equal(value int64) bool { + return n.value == value +}`, "", -1) + + // Add "score := hash(value)" + data = addLineAfter(data, "func (s *Int64Set) findNodeRemove", "score := hash(value)") + data = addLineAfter(data, "func (s *Int64Set) findNodeAdd", "score := hash(value)") + data = addLineAfter(data, "func (s *Int64Set) Contains", "score := hash(value)") + + // Update new value "newInt64Node(0" + data = strings.Replace(data, + "newInt64Node(0", `newInt64Node(""`, -1) + + data = strings.Replace(data, "NewInt64", "New"+upper, -1) + data = strings.Replace(data, "newInt64Node", "new"+upper+"Node", -1) + data = strings.Replace(data, "unlockInt64", "unlock"+upper, -1) + data = strings.Replace(data, "Int64Set", upper+"Set", -1) + data = strings.Replace(data, "int64Node", lower+"Node", -1) + data = strings.Replace(data, "value int64", "value "+lower, -1) + data = strings.Replace(data, "int64 skip set", lower+" skip set", -1) // comment + data = strings.Replace(data, " in ascending order", "", -1) // comment + + return data +} + +func lowerSlice(s []string) []string { + n := make([]string, len(s)) + for i, v := range s { + n[i] = strings.ToLower(v) + } + return n +} + +func inSlice(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} + +func addLineAfter(src string, after string, added string) string { + all := strings.Split(string(src), "\n") + for i, v := range all { + if strings.Index(v, after) != -1 { + res := make([]string, len(all)+1) + for j := 0; j <= i; j++ { + res[j] = all[j] + } + res[i+1] = added + for j := i + 1; j < len(all); j++ { + res[j+1] = all[j] + } + return strings.Join(res, "\n") + } + } + panic("can not find:" + after) +} diff --git a/structure/skipset/util.go b/structure/skipset/util.go new file mode 100644 index 0000000..9111bb8 --- /dev/null +++ b/structure/skipset/util.go @@ -0,0 +1,45 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package skipset + +import ( + "github.com/songzhibin97/gkit/internal/wyhash" + "github.com/songzhibin97/gkit/sys/fastrand" + _ "unsafe" // for linkname +) + +const ( + maxLevel = 16 + p = 0.25 + defaultHighestLevel = 3 +) + +func hash(s string) uint64 { + return wyhash.Sum64String(s) +} + +//go:linkname cmpstring runtime.cmpstring +func cmpstring(a, b string) int + +func randomLevel() int { + level := 1 + for fastrand.Uint32n(1/p) == 0 { + level++ + } + if level > maxLevel { + return maxLevel + } + return level +} diff --git a/structure/zset/README.md b/structure/zset/README.md new file mode 100644 index 0000000..294827e --- /dev/null +++ b/structure/zset/README.md @@ -0,0 +1,113 @@ +# zset + +## Introduction + +zset provides a concurrent-safety sorted set, can be used as a local replacement +of [Redis' zset](https://redis.com/ebook/part-2-core-concepts/chapter-3-commands-in-redis/3-5-sorted-sets/). + +The main difference to other sets is, every value of set is associated with a score, that is used to take the sorted set +ordered, from the smallest to the greatest score. + +The zset has `O(log(N))` time complexity when doing Add(ZADD) and Remove(ZREM) operations and `O(1)` time complexity +when doing Contains operations. + +## Features + +- Concurrent safe API +- Values are sorted with score +- Implementation equivalent to redis +- Fast skiplist level randomization + +## Comparison + +| Redis command | Go function | +|-----------------------|---------------------| +| ZADD | Add | +| ZINCRBY | IncrBy | +| ZREM | Remove | +| ZREMRANGEBYSCORE | RemoveRangeByScore | +| ZREMRANGEBYRANK | RemoveRangeByRank | +| ZUNION | Union | +| ZINTER | Inter | +| ZINTERCARD | *TODO* | +| ZDIFF | *TODO* | +| ZRANGE | Range | +| ZRANGEBYSCORE | IncrBy | +| ZREVRANGEBYSCORE | RevRangeByScore | +| ZCOUNT | Count | +| ZREVRANGE | RevRange | +| ZCARD | Len | +| ZSCORE | Score | +| ZRANK | Rank | +| ZREVRANK | RevRank | +| ZPOPMIN | *TODO* | +| ZPOPMAX | *TODO* | +| ZRANDMEMBER | *TODO* | + +List of redis commands are generated from the following command: + +```bash +cat redis/src/server.c | grep -o '"z.*",z.*Command' | grep -o '".*"' | cut -d '"' -f2 +``` + +You may find that not all redis commands have corresponding go implementations, +the reason is as follows: + +### Unsupported Commands + +Redis' zset can operates elements in lexicographic order, which is not commonly +used function, so zset does not support commands like ZREMRANGEBYLEX, ZLEXCOUNT +and so on. + +| Redis command | +|-----------------------| +| ZREMRANGEBYLEX | +| ZRANGEBYLEX | +| ZREVRANGEBYLEX | +| ZLEXCOUNT | + +In redis, user accesses zset via a string key. We do not need such string key +because we have variable. so the following commands are not implemented: + +| Redis command | +|-----------------------| +| ZUNIONSTORE | +| ZINTERSTORE | +| ZDIFFSTORE | +| ZRANGESTORE | +| ZMSCORE | +| ZSCAN | + +## QuickStart + +```go +package main + +import ( + "fmt" + + "github.com/songzhibin97/gkit/structure/zset" +) + +func main() { + z := zset.NewFloat64() + + values := []string{"Alice", "Bob", "Zhang"} + scores := []float64{90, 89, 59.9} + for i := range values { + z.Add(scores[i], values[i]) + } + + s, ok := z.Score("Alice") + if ok { + fmt.Println("Alice's score is", s) + } + + n := z.Count(0, 60) + fmt.Println("There are", n, "people below 60 points") + + for _, n := range z.Range(0, -1) { + fmt.Println("zset range found", n.Value, n.Score) + } +} +``` diff --git a/structure/zset/oparry.go b/structure/zset/oparry.go new file mode 100644 index 0000000..0a5bd76 --- /dev/null +++ b/structure/zset/oparry.go @@ -0,0 +1,69 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zset + +import ( + "unsafe" +) + +const ( + op1 = 4 + op2 = maxLevel - op1 // TODO: not sure that whether 4 is the best number for op1([28]Pointer for op2). +) + +type listLevel struct { + next unsafe.Pointer // the forward pointer + span int // span is count of level 0 element to next element in current level +} +type optionalArray struct { + base [op1]listLevel + extra *([op2]listLevel) +} + +func (a *optionalArray) init(level int) { + if level > op1 { + a.extra = new([op2]listLevel) + } +} + +func (a *optionalArray) loadNext(i int) unsafe.Pointer { + if i < op1 { + return a.base[i].next + } + return a.extra[i-op1].next +} + +func (a *optionalArray) storeNext(i int, p unsafe.Pointer) { + if i < op1 { + a.base[i].next = p + return + } + a.extra[i-op1].next = p +} + +func (a *optionalArray) loadSpan(i int) int { + if i < op1 { + return a.base[i].span + } + return a.extra[i-op1].span +} + +func (a *optionalArray) storeSpan(i int, s int) { + if i < op1 { + a.base[i].span = s + return + } + a.extra[i-op1].span = s +} diff --git a/structure/zset/opt.go b/structure/zset/opt.go new file mode 100644 index 0000000..8281cf8 --- /dev/null +++ b/structure/zset/opt.go @@ -0,0 +1,21 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zset + +// RangeOpt describes the whether the min/max is exclusive in score range. +type RangeOpt struct { + ExcludeMin bool + ExcludeMax bool +} diff --git a/structure/zset/skiplist.go b/structure/zset/skiplist.go new file mode 100644 index 0000000..6f092b0 --- /dev/null +++ b/structure/zset/skiplist.go @@ -0,0 +1,489 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package zset + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "math" + "unsafe" +) + +// +// Skip list implementation. +// + +const ( + maxLevel = 32 // same to ZSKIPLIST_MAXLEVEL, should be enough for 2^64 elements + probability = 0.25 // same to ZSKIPLIST_P, 1/4 +) + +// float64ListNode is node of float64List. +type float64ListNode struct { + score float64 // key for sorting, which is allowed to be repeated + value string + prev *float64ListNode // back pointer that only available at level 1 + level int // the length of optionalArray + oparr optionalArray +} + +func newFloat64ListNode(score float64, value string, level int) *float64ListNode { + node := &float64ListNode{ + score: score, + value: value, + level: level, + } + node.oparr.init(level) + return node +} + +func (n *float64ListNode) loadNext(i int) *float64ListNode { + return (*float64ListNode)(n.oparr.loadNext(i)) +} + +func (n *float64ListNode) storeNext(i int, node *float64ListNode) { + n.oparr.storeNext(i, unsafe.Pointer(node)) +} + +func (n *float64ListNode) loadSpan(i int) int { + return n.oparr.loadSpan(i) +} + +func (n *float64ListNode) storeSpan(i int, span int) { + n.oparr.storeSpan(i, span) +} + +func (n *float64ListNode) loadNextAndSpan(i int) (*float64ListNode, int) { + return n.loadNext(i), n.loadSpan(i) +} + +func (n *float64ListNode) storeNextAndSpan(i int, next *float64ListNode, span int) { + n.storeNext(i, next) + n.storeSpan(i, span) +} + +func (n *float64ListNode) lessThan(score float64, value string) bool { + if n.score < score { + return true + } + if n.score == score { + return n.value < value + } + return false +} + +func (n *float64ListNode) lessEqual(score float64, value string) bool { + if n.score < score { + return true + } + if n.score == score { + return n.value <= value + } + return false +} + +func (n *float64ListNode) equal(score float64, value string) bool { + return n.value == value && n.score == score +} + +// float64List is a specialized skip list implementation for sorted set. +// +// It is almost implement the original +// algorithm described by William Pugh in " Lists: A Probabilistic +// Alternative to Balanced Trees", modified in three ways: +// a) this implementation allows for repeated scores. +// b) the comparison is not just by key (our 'score') but by satellite data(?). +// c) there is a back pointer, so it's a doubly linked list with the back +// pointers being only at "level 1". This allows to traverse the list +// from tail to head, useful for RevRange. +type float64List struct { + header *float64ListNode + tail *float64ListNode + length int + highestLevel int // highest level for now +} + +func newFloat64List() *float64List { + l := &float64List{ + header: newFloat64ListNode(-math.MaxFloat64, "__HEADER", maxLevel), // FIXME: + highestLevel: 1, + } + return l +} + +// Insert inserts a new node in the skiplist. Assumes the element does not already +// exist (up to the caller to enforce that). +func (l *float64List) Insert(score float64, value string) *float64ListNode { + var ( + update [maxLevel]*float64ListNode + rank [maxLevel + 1]int // +1 for eliminating a boundary judgment + ) + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + rank[i] = rank[i+1] // also fine when i == maxLevel - 1 + next := x.loadNext(i) + for next != nil && next.lessThan(score, value) { + rank[i] += x.loadSpan(i) + x = next + next = x.loadNext(i) + } + update[i] = x + } + + // We assume the element is not already inside, since we allow duplicated + // scores, reinserting the same element should never happen since the + // caller of Add() should test in the hash table if the element is + // already inside or not. + level := l.randomLevel() + if level > l.highestLevel { + // Create higher levels. + for i := l.highestLevel; i < level; i++ { + rank[i] = 0 + update[i] = l.header + update[i].storeSpan(i, l.length) + } + l.highestLevel = level + } + x = newFloat64ListNode(score, value, level) + for i := 0; i < level; i++ { + // update --> x --> update.next + x.storeNext(i, update[i].loadNext(i)) + update[i].storeNext(i, x) + // update[i].span is splitted to: new update[i].span and x.span + x.storeSpan(i, update[i].loadSpan(i)-(rank[0]-rank[i])) + update[i].storeSpan(i, (rank[0]-rank[i])+1) + } + // Increment span for untouched levels. + for i := level; i < l.highestLevel; i++ { + update[i].storeSpan(i, update[i].loadSpan(i)+1) + } + + // Update back pointer. + if update[0] != l.header { + x.prev = update[0] + } + + if next := x.loadNext(0); next != nil { // not tail of skiplist + next.prev = x + } else { + l.tail = x + } + l.length++ + + return x +} + +// randomLevel returns a level between [1, maxLevel] for insertion. +func (l *float64List) randomLevel() int { + level := 1 + for fastrand.Uint32n(1/probability) == 0 { + level++ + } + if level > maxLevel { + return maxLevel + } + return level +} + +// Rank finds the rank for an element by both score and value. +// Returns 0 when the element cannot be found, rank otherwise. +// +// NOTE: the rank is 1-based due to the span of l->header to the +// first element. +func (l *float64List) Rank(score float64, value string) int { + rank := 0 + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && next.lessEqual(score, value) { + rank += x.loadSpan(i) + x = next + next = x.loadNext(i) + } + + // x might be equal to l->header, so test if obj is non-nil + // TODO: Why not use if x != l.header? + if x.equal(score, value) { + return rank + } + } + return 0 +} + +// deleteNode is a internal function for deleting node x in O(1) time by giving a +// update position matrix. +func (l *float64List) deleteNode(x *float64ListNode, update *[maxLevel]*float64ListNode) { + for i := 0; i < l.highestLevel; i++ { + if update[i].loadNext(i) == x { + // Remove x, updaet[i].span = updaet[i].span + x.span - 1 (x removed). + next, span := x.loadNextAndSpan(i) + span += update[i].loadSpan(i) - 1 + update[i].storeNextAndSpan(i, next, span) + } else { + // x does not appear on this level, just update span. + update[i].storeSpan(i, update[i].loadSpan(i)-1) + } + } + if next := x.loadNext(0); next != nil { // not tail of skiplist + next.prev = x.prev + } else { + l.tail = x.prev + } + for l.highestLevel > 1 && l.header.loadNext(l.highestLevel-1) != nil { + // Clear the pointer and span for safety. + l.header.storeNextAndSpan(l.highestLevel-1, nil, 0) + l.highestLevel-- + } + l.length-- +} + +// Delete deletes an element with matching score/element from the skiplist. +// The deleted node is returned if the node was found, otherwise 0 is returned. +func (l *float64List) Delete(score float64, value string) *float64ListNode { + var update [maxLevel]*float64ListNode + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && next.lessThan(score, value) { + x = next + next = x.loadNext(i) + } + update[i] = x + } + x = x.loadNext(0) + if x != nil && x.equal(score, value) { + l.deleteNode(x, &update) + return x + } + return nil // not found +} + +// UpdateScore updates the score of an element inside the sorted set skiplist. +// +// NOTE: the element must exist and must match 'score'. +// This function does not update the score in the hash table side, the +// caller should take care of it. +// +// NOTE: this function attempts to just update the node, in case after +// the score update, the node would be exactly at the same position. +// Otherwise the skiplist is modified by removing and re-adding a new +// element, which is more costly. +// +// The function returns the updated element skiplist node pointer. +func (l *float64List) UpdateScore(oldScore float64, value string, newScore float64) *float64ListNode { + var update [maxLevel]*float64ListNode + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && next.lessThan(oldScore, value) { + x = next + next = x.loadNext(i) + } + update[i] = x + } + + // Jump to our element: note that this function assumes that the + // element with the matching score exists. + x = x.loadNext(0) + + // Fastpath: If the node, after the score update, would be still exactly + // at the same position, we can just update the score without + // actually removing and re-inserting the element in the skiplist. + if next := x.loadNext(0); (x.prev == nil || x.prev.score < newScore) && + (next == nil || next.score > newScore) { + x.score = newScore + return x + } + + // No way to reuse the old node: we need to remove and insert a new + // one at a different place. + v := x.value + l.deleteNode(x, &update) + newNode := l.Insert(newScore, v) + return newNode +} + +func greaterThanMin(value float64, min float64, ex bool) bool { + if ex { + return value > min + } + return value >= min +} + +func lessThanMax(value float64, max float64, ex bool) bool { + if ex { + return value < max + } + return value <= max +} + +// DeleteRangeByScore deletes all the elements with score between min and max +// from the skiplist. +// Both min and max can be inclusive or exclusive (see RangeOpt). +// When inclusive a score >= min && score <= max is deleted. +// +// This function returns count of deleted elements. +func (l *float64List) DeleteRangeByScore(min, max float64, opt RangeOpt, dict map[string]float64) []Float64Node { + var ( + update [maxLevel]*float64ListNode + removed []Float64Node + ) + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && !greaterThanMin(next.score, min, opt.ExcludeMin) { + x = next + next = x.loadNext(i) + } + update[i] = x + } + + // Current node is the last with score not greater than min. + x = x.loadNext(0) + + // Delete nodes in range. + for x != nil && lessThanMax(x.score, max, opt.ExcludeMax) { + next := x.loadNext(0) + l.deleteNode(x, &update) + delete(dict, x.value) + removed = append(removed, Float64Node{ + Value: x.value, + Score: x.score, + }) + x = next + } + + return removed +} + +// Delete all the elements with rank between start and end from the skiplist. +// Start and end are inclusive. +// +// NOTE: start and end need to be 1-based +func (l *float64List) DeleteRangeByRank(start, end int, dict map[string]float64) []Float64Node { + var ( + update [maxLevel]*float64ListNode + removed []Float64Node + traversed int + ) + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next, span := x.loadNextAndSpan(i) + for next != nil && traversed+span < start { + traversed += span + x = next + next, span = x.loadNextAndSpan(i) + } + update[i] = x + } + + traversed++ + x = x.loadNext(0) + // Delete nodes in range. + for x != nil && traversed <= end { + next := x.loadNext(0) + l.deleteNode(x, &update) + delete(dict, x.value) + removed = append(removed, Float64Node{ + Value: x.value, + Score: x.score, + }) + traversed++ + x = next + } + return removed +} + +// GetNodeByRank finds an element by its rank. The rank argument needs to be 1-based. +func (l *float64List) GetNodeByRank(rank int) *float64ListNode { + var traversed int + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next, span := x.loadNextAndSpan(i) + for next != nil && traversed+span <= rank { + traversed += span + x = next + next, span = x.loadNextAndSpan(i) + } + if traversed == rank { + return x + } + } + return nil +} + +// FirstInRange finds the first node that is contained in the specified range. +func (l *float64List) FirstInRange(min, max float64, opt RangeOpt) *float64ListNode { + if !l.IsInRange(min, max, opt) { + return nil + } + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && !greaterThanMin(next.score, min, opt.ExcludeMin) { + x = next + next = x.loadNext(i) + } + } + + // The next node MUST not be NULL (excluded by IsInRange). + x = x.loadNext(0) + if !lessThanMax(x.score, max, opt.ExcludeMax) { + return nil + } + return x +} + +// LastInRange finds the last node that is contained in the specified range. +func (l *float64List) LastInRange(min, max float64, opt RangeOpt) *float64ListNode { + if !l.IsInRange(min, max, opt) { + return nil + } + + x := l.header + for i := l.highestLevel - 1; i >= 0; i-- { + next := x.loadNext(i) + for next != nil && lessThanMax(next.score, max, opt.ExcludeMax) { + x = next + next = x.loadNext(i) + } + } + + // The node x must not be NULL (excluded by IsInRange). + if !greaterThanMin(x.score, min, opt.ExcludeMin) { + return nil + } + return x +} + +// IsInRange returns whether there is a port of sorted set in given range. +func (l *float64List) IsInRange(min, max float64, opt RangeOpt) bool { + // Test empty range. + if min > max || (min == max && (opt.ExcludeMin || opt.ExcludeMax)) { + return false + } + if l.tail == nil || !greaterThanMin(l.tail.score, min, opt.ExcludeMin) { + return false + } + if next := l.header.loadNext(0); next == nil || !lessThanMax(next.score, max, opt.ExcludeMax) { + return false + } + return true +} diff --git a/structure/zset/zset.go b/structure/zset/zset.go new file mode 100644 index 0000000..48acc66 --- /dev/null +++ b/structure/zset/zset.go @@ -0,0 +1,411 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zset provides a concurrent-safety sorted set, can be used as a local +// replacement of Redis' zset (https://redis.com/ebook/part-2-core-concepts/chapter-3-commands-in-redis/3-5-sorted-sets/). +// +// The main different to other sets is, every value of set is associated with a score, +// that is used in order to take the sorted set ordered, from the smallest to the greatest score. +// +// The sorted set has O(log(N)) time complexity when doing Add(ZADD) and +// Remove(ZREM) operations and O(1) time complexity when doing Contains operations. +package zset + +import ( + "sync" +) + +// Float64Node represents an element of Float64Set. +type Float64Node struct { + Value string + Score float64 +} + +// Float64Set is a sorted set implementation with string value and float64 score. +type Float64Set struct { + mu sync.RWMutex + dict map[string]float64 + list *float64List +} + +// NewFloat64 returns an empty string sorted set with int score. +// strings are sorted in ascending order. +func NewFloat64() *Float64Set { + return &Float64Set{ + dict: make(map[string]float64), + list: newFloat64List(), + } +} + +// UnionFloat64 returns the union of given sorted sets, the resulting score of +// a value is the sum of its scores in the sorted sets where it exists. +// +// UnionFloat64 is the replacement of UNIONSTORE command of redis. +func UnionFloat64(zs ...*Float64Set) *Float64Set { + dest := NewFloat64() + for _, z := range zs { + for _, n := range z.Range(0, -1) { + dest.Add(n.Score, n.Value) + } + } + return dest +} + +// InterFloat64 returns the intersection of given sorted sets, the resulting +// score of a value is the sum of its scores in the sorted sets where it exists. +// +// InterFloat64 is the replacement of INTERSTORE command of redis. +func InterFloat64(zs ...*Float64Set) *Float64Set { + dest := NewFloat64() + if len(zs) == 0 { + return dest + } + for _, n := range zs[0].Range(0, -1) { + ok := true + for _, z := range zs[1:] { + if !z.Contains(n.Value) { + ok = false + break + } + } + if ok { + dest.Add(n.Score, n.Value) + } + } + return dest +} + +// Len returns the length of Float64Set. +// +// Len is the replacement of ZCARD command of redis. +func (z *Float64Set) Len() int { + z.mu.RLock() + defer z.mu.RUnlock() + + return z.list.length +} + +// Add adds a new value or update the score of an existing value. +// Returns true if the value is newly created. +// +// Add is the replacement of ZADD command of redis. +func (z *Float64Set) Add(score float64, value string) bool { + z.mu.Lock() + defer z.mu.Unlock() + + oldScore, ok := z.dict[value] + if ok { + // Update score if need. + if score != oldScore { + _ = z.list.UpdateScore(oldScore, value, score) + z.dict[value] = score + } + return false + } + + // Insert a new element. + z.list.Insert(score, value) + z.dict[value] = score + return true +} + +// Remove removes a value from the sorted set. +// Returns score of the removed value and true if the node was found and deleted, +// otherwise returns (0.0, false). +// +// Remove is the replacement of ZREM command of redis. +func (z *Float64Set) Remove(value string) (float64, bool) { + z.mu.Lock() + defer z.mu.Unlock() + + score, ok := z.dict[value] + if !ok { + return 0, false + } + delete(z.dict, value) + z.list.Delete(score, value) + return score, true +} + +// IncrBy increments the score of value in the sorted set by incr. +// If value does not exist in the sorted set, it is added with incr as its score +// (as if its previous score was zero). +// +// IncrBy is the replacement of ZINCRBY command of redis. +func (z *Float64Set) IncrBy(incr float64, value string) (float64, bool) { + z.mu.Lock() + defer z.mu.Unlock() + + oldScore, ok := z.dict[value] + if !ok { + // Insert a new element. + z.list.Insert(incr, value) + z.dict[value] = incr + return incr, false + } + // Update score. + newScore := oldScore + incr + _ = z.list.UpdateScore(oldScore, value, newScore) + z.dict[value] = newScore + return newScore, true +} + +// Contains returns whether the value exists in sorted set. +func (z *Float64Set) Contains(value string) bool { + _, ok := z.Score(value) + return ok +} + +// Score returns the score of the value in the sorted set. +// +// Score is the replacement of ZSCORE command of redis. +func (z *Float64Set) Score(value string) (float64, bool) { + z.mu.RLock() + defer z.mu.RUnlock() + + score, ok := z.dict[value] + return score, ok +} + +// Rank returns the rank of element in the sorted set, with the scores +// ordered from low to high. +// The rank (or index) is 0-based, which means that the member with the lowest +// score has rank 0. +// -1 is returned when value is not found. +// +// Rank is the replacement of ZRANK command of redis. +func (z *Float64Set) Rank(value string) int { + z.mu.RLock() + defer z.mu.RUnlock() + + score, ok := z.dict[value] + if !ok { + return -1 + } + // NOTE: list.Rank returns 1-based rank. + return z.list.Rank(score, value) - 1 +} + +// RevRank returns the rank of element in the sorted set, with the scores +// ordered from high to low. +// The rank (or index) is 0-based, which means that the member with the highest +// score has rank 0. +// -1 is returned when value is not found. +// +// RevRank is the replacement of ZREVRANK command of redis. +func (z *Float64Set) RevRank(value string) int { + z.mu.RLock() + defer z.mu.RUnlock() + + score, ok := z.dict[value] + if !ok { + return -1 + } + // NOTE: list.Rank returns 1-based rank. + return z.list.Rank(score, value) - 1 +} + +// Count returns the number of elements in the sorted set at element with a score +// between min and max (including elements with score equal to min or max). +// +// Count is the replacement of ZCOUNT command of redis. +func (z *Float64Set) Count(min, max float64) int { + return z.CountWithOpt(min, max, RangeOpt{}) +} + +func (z *Float64Set) CountWithOpt(min, max float64, opt RangeOpt) int { + z.mu.RLock() + defer z.mu.RUnlock() + + first := z.list.FirstInRange(min, max, opt) + if first == nil { + return 0 + } + // Sub 1 for 1-based rank. + firstRank := z.list.Rank(first.score, first.value) - 1 + last := z.list.LastInRange(min, max, opt) + if last == nil { + return z.list.length - firstRank + } + // Sub 1 for 1-based rank. + lastRank := z.list.Rank(last.score, last.value) - 1 + return lastRank - firstRank + 1 +} + +// Range returns the specified inclusive range of elements in the sorted set by rank(index). +// Both start and stop are 0-based, they can also be negative numbers indicating +// offsets from the end of the sorted set, with -1 being the last element of the sorted set, +// and so on. +// +// The returned elements are ordered by score, from lowest to highest. +// Elements with the same score are ordered lexicographically. +// +// This function won't panic even when the given rank out of range. +// +// NOTE: Please always use z.Range(0, -1) for iterating the whole sorted set. +// z.Range(0, z.Len()-1) has 2 method calls, the sorted set may changes during +// the gap of calls. +// +// Range is the replacement of ZRANGE command of redis. +func (z *Float64Set) Range(start, stop int) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + // Convert negative rank to positive. + if start < 0 { + start = z.list.length + start + } + if stop < 0 { + stop = z.list.length + stop + } + + var res []Float64Node + x := z.list.GetNodeByRank(start + 1) // 0-based rank -> 1-based rank + for x != nil && start <= stop { + start++ + res = append(res, Float64Node{ + Score: x.score, + Value: x.value, + }) + x = x.loadNext(0) + } + return res +} + +// RangeByScore returns all the elements in the sorted set with a score +// between min and max (including elements with score equal to min or max). +// The elements are considered to be ordered from low to high scores. +// +// RangeByScore is the replacement of ZRANGEBYSCORE command of redis. +func (z *Float64Set) RangeByScore(min, max float64) []Float64Node { + return z.RangeByScoreWithOpt(min, max, RangeOpt{}) +} + +func (z *Float64Set) RangeByScoreWithOpt(min, max float64, opt RangeOpt) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + var res []Float64Node + x := z.list.FirstInRange(min, max, opt) + for x != nil && (x.score < max || (!opt.ExcludeMax && x.score == max)) { + res = append(res, Float64Node{ + Score: x.score, + Value: x.value, + }) + x = x.loadNext(0) + } + return res +} + +// RevRange returns the specified inclusive range of elements in the sorted set by rank(index). +// Both start and stop are 0-based, they can also be negative numbers indicating +// offsets from the end of the sorted set, with -1 being the first element of the sorted set, +// and so on. +// +// The returned elements are ordered by score, from highest to lowest. +// Elements with the same score are ordered in reverse lexicographical ordering. +// +// This function won't panic even when the given rank out of range. +// +// NOTE: Please always use z.RevRange(0, -1) for iterating the whole sorted set. +// z.RevRange(0, z.Len()-1) has 2 method calls, the sorted set may changes during +// the gap of calls. +// +// RevRange is the replacement of ZREVRANGE command of redis. +func (z *Float64Set) RevRange(start, stop int) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + // Convert negative rank to positive. + if start < 0 { + start = z.list.length + start + } + if stop < 0 { + stop = z.list.length + stop + } + + var res []Float64Node + x := z.list.GetNodeByRank(z.list.length - start) // 0-based rank -> 1-based rank + for x != nil && start <= stop { + start++ + res = append(res, Float64Node{ + Score: x.score, + Value: x.value, + }) + x = x.prev + } + return res +} + +// RevRangeByScore returns all the elements in the sorted set with a +// score between max and min (including elements with score equal to max or min). +// The elements are considered to be ordered from high to low scores. +// +// RevRangeByScore is the replacement of ZREVRANGEBYSCORE command of redis. +func (z *Float64Set) RevRangeByScore(max, min float64) []Float64Node { + return z.RevRangeByScoreWithOpt(max, min, RangeOpt{}) +} + +func (z *Float64Set) RevRangeByScoreWithOpt(max, min float64, opt RangeOpt) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + var res []Float64Node + x := z.list.LastInRange(min, max, opt) + for x != nil && (x.score > min || (!opt.ExcludeMin && x.score == min)) { + res = append(res, Float64Node{ + Score: x.score, + Value: x.value, + }) + x = x.prev + } + return res +} + +// RemoveRangeByRank removes all elements in the sorted set stored with rank +// between start and stop. +// Both start and stop are 0-based, they can also be negative numbers indicating +// offsets from the end of the sorted set, with -1 being the last element of the sorted set, +// and so on. +// +// RemoveRangeByRank is the replacement of ZREMRANGEBYRANK command of redis. +func (z *Float64Set) RemoveRangeByRank(start, stop int) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + // Convert negative rank to positive. + if start < 0 { + start = z.list.length + start + } + if stop < 0 { + stop = z.list.length + stop + } + + return z.list.DeleteRangeByRank(start+1, stop+1, z.dict) // 0-based rank -> 1-based rank +} + +// RemoveRangeByScore removes all elements in the sorted set stored with a score +// between min and max (including elements with score equal to min or max). +// +// RemoveRangeByScore is the replacement of ZREMRANGEBYSCORE command of redis. +func (z *Float64Set) RemoveRangeByScore(min, max float64) []Float64Node { + return z.RevRangeByScoreWithOpt(min, max, RangeOpt{}) +} + +func (z *Float64Set) RemoveRangeByScoreWithOpt(min, max float64, opt RangeOpt) []Float64Node { + z.mu.RLock() + defer z.mu.RUnlock() + + return z.list.DeleteRangeByScore(min, max, opt, z.dict) +} diff --git a/structure/zset/zset_bench_test.go b/structure/zset/zset_bench_test.go new file mode 100644 index 0000000..858f8b1 --- /dev/null +++ b/structure/zset/zset_bench_test.go @@ -0,0 +1,114 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zset + +import ( + "github.com/songzhibin97/gkit/sys/fastrand" + "math" + "strconv" + "testing" +) + +const initSize = 1 << 10 +const randN = math.MaxUint32 + +func BenchmarkContains100Hits(b *testing.B) { + benchmarkContainsNHits(b, 100) +} + +func BenchmarkContains50Hits(b *testing.B) { + benchmarkContainsNHits(b, 50) +} + +func BenchmarkContainsNoHits(b *testing.B) { + benchmarkContainsNHits(b, 0) +} + +func benchmarkContainsNHits(b *testing.B, n int) { + b.Run("sortedset", func(b *testing.B) { + z := NewFloat64() + var vals []string + for i := 0; i < initSize; i++ { + val := strconv.Itoa(i) + vals = append(vals, val) + if fastrand.Intn(100)+1 <= n { + z.Add(fastrand.Float64(), val) + } + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = z.Contains(vals[fastrand.Intn(initSize)]) + } + }) + }) +} + +func BenchmarkAdd(b *testing.B) { + benchmarkNAddNIncrNRemoveNContains(b, 100, 0, 0, 0) +} + +func Benchmark1Add99Contains(b *testing.B) { + benchmarkNAddNIncrNRemoveNContains(b, 1, 0, 0, 99) +} + +func Benchmark10Add90Contains(b *testing.B) { + benchmarkNAddNIncrNRemoveNContains(b, 10, 0, 0, 90) +} + +func Benchmark50Add50Contains(b *testing.B) { + benchmarkNAddNIncrNRemoveNContains(b, 50, 0, 0, 50) +} + +func Benchmark1Add3Incr6Remove90Contains(b *testing.B) { + benchmarkNAddNIncrNRemoveNContains(b, 1, 3, 6, 90) +} + +func benchmarkNAddNIncrNRemoveNContains(b *testing.B, nAdd, nIncr, nRemove, nContains int) { + // var anAdd, anIncr, anRemove, anContains int + + b.Run("sortedset", func(b *testing.B) { + z := NewFloat64() + var vals []string + var scores []float64 + var ops []int + for i := 0; i < initSize; i++ { + vals = append(vals, strconv.Itoa(fastrand.Intn(randN))) + scores = append(scores, fastrand.Float64()) + ops = append(ops, fastrand.Intn(100)) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + r := fastrand.Intn(initSize) + val := vals[r] + if u := ops[r] + 1; u <= nAdd { + // anAdd++ + z.Add(scores[r], val) + } else if u-nAdd <= nIncr { + // anIncr++ + z.IncrBy(scores[r], val) + } else if u-nAdd-nIncr <= nRemove { + // anRemove++ + z.Remove(val) + } else if u-nAdd-nIncr-nRemove <= nContains { + // anContains++ + z.Contains(val) + } + } + }) + // b.Logf("N: %d, Add: %f, Incr: %f, Remove: %f, Contains: %f", b.N, float64(anAdd)/float64(b.N), float64(anIncr)/float64(b.N), float64(anRemove)/float64(b.N), float64(anContains)/float64(b.N)) + }) +} diff --git a/structure/zset/zset_test.go b/structure/zset/zset_test.go new file mode 100644 index 0000000..33f9abc --- /dev/null +++ b/structure/zset/zset_test.go @@ -0,0 +1,594 @@ +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zset + +import ( + "fmt" + "github.com/songzhibin97/gkit/sys/fastrand" + "math/rand" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randString(prefix string) string { + b := make([]rune, 8) + for i := range b { + b[i] = letterRunes[fastrand.Intn(len(letterRunes))] + } + return prefix + string(b) +} + +func TestFloat64Set(t *testing.T) { + z := NewFloat64() + assert.Zero(t, z.Len()) +} + +func TestFloat64SetAdd(t *testing.T) { + z := NewFloat64() + v := randString("") + assert.True(t, z.Add(1, v)) + assert.False(t, z.Add(1, v)) +} + +func TestFloat64SetContains(t *testing.T) { + z := NewFloat64() + v := randString("") + z.Add(1, v) + assert.True(t, z.Contains(v)) + assert.False(t, z.Contains("no-such-"+v)) +} + +func TestFloat64SetScore(t *testing.T) { + z := NewFloat64() + v := randString("") + s := rand.Float64() + z.Add(s, v) + as, ok := z.Score(v) + assert.True(t, ok) + assert.Equal(t, s, as) + _, ok = z.Score("no-such-" + v) + assert.False(t, ok) +} + +func TestFloat64SetIncr(t *testing.T) { + z := NewFloat64() + _, ok := z.Score("t") + assert.False(t, ok) + + // test first insert + s, ok := z.IncrBy(1, "t") + assert.False(t, ok) + assert.Equal(t, 1.0, s) + + // test regular incr + s, ok = z.IncrBy(2, "t") + assert.True(t, ok) + assert.Equal(t, 3.0, s) +} + +func TestFloat64SetRemove(t *testing.T) { + z := NewFloat64() + // test first insert + ok := z.Add(1, "t") + assert.True(t, ok) + _, ok = z.Remove("t") + assert.True(t, ok) +} + +func TestFloat64SetRank(t *testing.T) { + z := NewFloat64() + v := randString("") + z.Add(1, v) + // test rank of exist value + assert.Equal(t, 0, z.Rank(v)) + // test rank of non-exist value + assert.Equal(t, -1, z.Rank("no-such-"+v)) +} + +func TestFloat64SetRank_Many(t *testing.T) { + const N = 1000 + z := NewFloat64() + rand.Seed(time.Now().Unix()) + + var vs []string + for i := 0; i < N; i++ { + v := randString("") + z.Add(rand.Float64(), v) + vs = append(vs, v) + } + for _, v := range vs { + r := z.Rank(v) + assert.NotEqual(t, -1, r) + + // verify rank by traversing level 0 + actualRank := 0 + x := z.list.header + for x != nil { + x = x.loadNext(0) + if x.value == v { + break + } + actualRank++ + } + assert.Equal(t, v, x.value) + assert.Equal(t, r, actualRank) + } +} + +func TestFloat64SetRank_UpdateScore(t *testing.T) { + z := NewFloat64() + rand.Seed(time.Now().Unix()) + + var vs []string + for i := 0; i < 100; i++ { + v := fmt.Sprint(i) + z.Add(rand.Float64(), v) + vs = append(vs, v) + } + // Randomly update score + for i := 0; i < 100; i++ { + // 1/2 + if rand.Float64() > 0.5 { + continue + } + z.Add(float64(i), fmt.Sprint(i)) + } + + for _, v := range vs { + r := z.Rank(v) + assert.NotEqual(t, -1, r) + assert.Greater(t, z.Len(), r) + + // verify rank by traversing level 0 + actualRank := 0 + x := z.list.header + for x != nil { + x = x.loadNext(0) + if x.value == v { + break + } + actualRank++ + } + assert.Equal(t, v, x.value) + assert.Equal(t, r, actualRank) + } +} + +// Test whether the ramdom inserted values sorted +func TestFloat64SetIsSorted(t *testing.T) { + const N = 1000 + z := NewFloat64() + rand.Seed(time.Now().Unix()) + + // Test whether the ramdom inserted values sorted + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + testIsSorted(t, z) + testInternalSpan(t, z) + + // Randomly update score + for i := 0; i < N; i++ { + // 1/2 + if rand.Float64() > 0.5 { + continue + } + z.Add(float64(i), fmt.Sprint(i)) + } + + testIsSorted(t, z) + testInternalSpan(t, z) + + // Randomly add or delete value + for i := 0; i < N; i++ { + // 1/2 + if rand.Float64() > 0.5 { + continue + } + z.Remove(fmt.Sprint(i)) + } + testIsSorted(t, z) + testInternalSpan(t, z) +} + +func testIsSorted(t *testing.T, z *Float64Set) { + var scores []float64 + for _, n := range z.Range(0, z.Len()-1) { + scores = append(scores, n.Score) + } + assert.True(t, sort.Float64sAreSorted(scores)) +} + +func testInternalSpan(t *testing.T, z *Float64Set) { + l := z.list + for i := l.highestLevel - 1; i >= 0; i-- { + x := l.header + for x.loadNext(i) != nil { + x = x.loadNext(i) + span := x.loadSpan(i) + from := x.value + fromScore := x.score + fromRank := l.Rank(fromScore, from) + assert.NotEqual(t, -1, fromRank) + + if x.loadNext(i) != nil { // from -> to + to := x.loadNext(i).value + toScore := x.loadNext(i).score + toRank := l.Rank(toScore, to) + assert.NotEqual(t, -1, toRank) + + // span = to.rank - from.rank + assert.Equalf(t, span, toRank-fromRank, "from %q (score: , rank: %d) to %q (score: %d, rank: %d), expect span: %d, actual: %d", + from, fromScore, fromRank, to, toScore, toRank, span, toRank-fromRank) + } else { // from -> nil + // span = skiplist.len - from.rank + assert.Equalf(t, l.length-fromRank, x.loadSpan(i), "%q (score: , rank: %d)", from, fromScore, fromRank) + } + } + } +} + +func TestFloat64SetRange(t *testing.T) { + testFloat64SetRange(t, false) +} + +func TestFloat64SetRevRange(t *testing.T) { + testFloat64SetRange(t, true) +} + +func testFloat64SetRange(t *testing.T, rev bool) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + + start, stop := func(a, b int) (int, int) { + if a < b { + return a, b + } else { + return b, a + } + }(fastrand.Intn(N), fastrand.Intn(N)) + var ns []Float64Node + if rev { + ns = z.RevRange(start, stop) + } else { + ns = z.Range(start, stop) + } + assert.Equal(t, stop-start+1, len(ns)) + for i, n := range ns { + if rev { + assert.Equal(t, z.Len()-1-(start+i), z.Rank(n.Value)) + } else { + assert.Equal(t, start+i, z.Rank(n.Value)) + } + } +} + +func TestFloat64SetRange_Negative(t *testing.T) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + ns := z.Range(-1, -1) + assert.Equal(t, 1, len(ns)) + assert.Equal(t, z.Len()-1, z.Rank(ns[0].Value)) +} + +func TestFloat64SetRevRange_Negative(t *testing.T) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + ns := z.RevRange(-1, -1) + assert.Equal(t, 1, len(ns)) + assert.Equal(t, 0, z.Rank(ns[0].Value)) +} + +func TestFloat64SetRangeByScore(t *testing.T) { + testFloat64SetRangeByScore(t, false) +} + +func TestFloat64SetRangeByScoreWithOpt(t *testing.T) { + z := NewFloat64() + z.Add(1.0, "1") + z.Add(1.1, "2") + z.Add(2.0, "3") + + ns := z.RangeByScoreWithOpt(1.0, 2.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 2, len(ns)) + assert.Equal(t, 1.1, ns[0].Score) + assert.Equal(t, 2.0, ns[1].Score) + + ns = z.RangeByScoreWithOpt(1.0, 2.0, RangeOpt{ExcludeMin: true, ExcludeMax: true}) + assert.Equal(t, 1, len(ns)) + assert.Equal(t, 1.1, ns[0].Score) + + ns = z.RangeByScoreWithOpt(1.0, 2.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 2, len(ns)) + assert.Equal(t, 1.0, ns[0].Score) + assert.Equal(t, 1.1, ns[1].Score) + + ns = z.RangeByScoreWithOpt(2.0, 1.0, RangeOpt{}) + assert.Equal(t, 0, len(ns)) + ns = z.RangeByScoreWithOpt(2.0, 1.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RangeByScoreWithOpt(2.0, 1.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 0, len(ns)) + + ns = z.RangeByScoreWithOpt(1.0, 1.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RangeByScoreWithOpt(1.0, 1.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RangeByScoreWithOpt(1.0, 1.0, RangeOpt{}) + assert.Equal(t, 1, len(ns)) +} + +func TestFloat64SetRevRangeByScoreWithOpt(t *testing.T) { + z := NewFloat64() + z.Add(1.0, "1") + z.Add(1.1, "2") + z.Add(2.0, "3") + + ns := z.RevRangeByScoreWithOpt(2.0, 1.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 2, len(ns)) + assert.Equal(t, 1.1, ns[0].Score) + assert.Equal(t, 1.0, ns[1].Score) + + ns = z.RevRangeByScoreWithOpt(2.0, 1.0, RangeOpt{ExcludeMax: true, ExcludeMin: true}) + assert.Equal(t, 1, len(ns)) + assert.Equal(t, 1.1, ns[0].Score) + + ns = z.RevRangeByScoreWithOpt(2.0, 1.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 2, len(ns)) + assert.Equal(t, 2.0, ns[0].Score) + assert.Equal(t, 1.1, ns[1].Score) + + ns = z.RevRangeByScoreWithOpt(1.0, 2.0, RangeOpt{}) + assert.Equal(t, 0, len(ns)) + ns = z.RevRangeByScoreWithOpt(1.0, 2.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RevRangeByScoreWithOpt(1.0, 2.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 0, len(ns)) + + ns = z.RevRangeByScoreWithOpt(1.0, 1.0, RangeOpt{ExcludeMax: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RevRangeByScoreWithOpt(1.0, 1.0, RangeOpt{ExcludeMin: true}) + assert.Equal(t, 0, len(ns)) + ns = z.RevRangeByScoreWithOpt(1.0, 1.0, RangeOpt{}) + assert.Equal(t, 1, len(ns)) +} + +func TestFloat64SetRevRangeByScore(t *testing.T) { + testFloat64SetRangeByScore(t, true) +} + +func testFloat64SetRangeByScore(t *testing.T, rev bool) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + + min, max := func(a, b float64) (float64, float64) { + if a < b { + return a, b + } else { + return b, a + } + }(fastrand.Float64(), fastrand.Float64()) + + var ns []Float64Node + if rev { + ns = z.RevRangeByScore(max, min) + } else { + ns = z.RangeByScore(min, max) + } + var prev *float64 + for _, n := range ns { + assert.LessOrEqual(t, min, n.Score) + assert.GreaterOrEqual(t, max, n.Score) + if prev != nil { + if rev { + assert.True(t, *prev >= n.Score) + } else { + assert.True(t, *prev <= n.Score) + } + } + prev = &n.Score + } +} + +func TestFloat64SetCountWithOpt(t *testing.T) { + testFloat64SetCountWithOpt(t, RangeOpt{}) + testFloat64SetCountWithOpt(t, RangeOpt{true, true}) + testFloat64SetCountWithOpt(t, RangeOpt{true, false}) + testFloat64SetCountWithOpt(t, RangeOpt{false, true}) +} + +func testFloat64SetCountWithOpt(t *testing.T, opt RangeOpt) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + + min, max := func(a, b float64) (float64, float64) { + if a < b { + return a, b + } else { + return b, a + } + }(fastrand.Float64(), fastrand.Float64()) + + n := z.CountWithOpt(min, max, opt) + actualN := 0 + for _, n := range z.Range(0, -1) { + if opt.ExcludeMin { + if n.Score <= min { + continue + } + } else { + if n.Score < min { + continue + } + } + if opt.ExcludeMax { + if n.Score >= max { + continue + } + } else { + if n.Score > max { + continue + } + } + actualN++ + } + assert.Equal(t, actualN, n) +} + +func TestFloat64SetRemoveRangeByRank(t *testing.T) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + + start, stop := func(a, b int) (int, int) { + if a < b { + return a, b + } else { + return b, a + } + }(fastrand.Intn(N), fastrand.Intn(N)) + + expectNs := z.Range(start, stop) + actualNs := z.RemoveRangeByRank(start, stop) + assert.Equal(t, expectNs, actualNs) + + // test whether removed + for _, n := range actualNs { + assert.False(t, z.Contains(n.Value)) + } + assert.Equal(t, N, z.Len()+len(actualNs)) +} + +func TestFloat64SetRemoveRangeByScoreWithOpt(t *testing.T) { + testFloat64SetRemoveRangeByScoreWithOpt(t, RangeOpt{}) + testFloat64SetRemoveRangeByScoreWithOpt(t, RangeOpt{true, true}) + testFloat64SetRemoveRangeByScoreWithOpt(t, RangeOpt{true, false}) + testFloat64SetRemoveRangeByScoreWithOpt(t, RangeOpt{false, false}) +} + +func testFloat64SetRemoveRangeByScoreWithOpt(t *testing.T, opt RangeOpt) { + const N = 1000 + z := NewFloat64() + for i := 0; i < N; i++ { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + + min, max := func(a, b float64) (float64, float64) { + if a < b { + return a, b + } else { + return b, a + } + }(fastrand.Float64(), fastrand.Float64()) + + expectNs := z.RangeByScoreWithOpt(min, max, opt) + actualNs := z.RemoveRangeByScoreWithOpt(min, max, opt) + assert.Equal(t, expectNs, actualNs) + + // test whether removed + for _, n := range actualNs { + assert.False(t, z.Contains(n.Value)) + } + assert.Equal(t, N, z.Len()+len(actualNs)) +} + +func TestUnionFloat64(t *testing.T) { + var zs []*Float64Set + for i := 0; i < 10; i++ { + z := NewFloat64() + for j := 0; j < 100; j++ { + if fastrand.Float64() > 0.8 { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + } + zs = append(zs, z) + } + z := UnionFloat64(zs...) + for _, n := range z.Range(0, z.Len()-1) { + var expectScore float64 + for i := 0; i < 10; i++ { + s, _ := zs[i].Score(n.Value) + expectScore += s + } + assert.Equal(t, expectScore, n.Score) + } +} + +func TestUnionFloat64_Empty(t *testing.T) { + z := UnionFloat64() + assert.Zero(t, z.Len()) +} + +func TestInterFloat64(t *testing.T) { + var zs []*Float64Set + for i := 0; i < 10; i++ { + z := NewFloat64() + for j := 0; j < 10; j++ { + if fastrand.Float64() > 0.8 { + z.Add(fastrand.Float64(), fmt.Sprint(i)) + } + } + zs = append(zs, z) + } + z := InterFloat64(zs...) + for _, n := range z.Range(0, z.Len()-1) { + var expectScore float64 + for i := 0; i < 10; i++ { + s, ok := zs[i].Score(n.Value) + assert.True(t, ok) + expectScore += s + } + assert.Equal(t, expectScore, n.Score) + } +} + +func TestInterFloat64_Empty(t *testing.T) { + z := InterFloat64() + assert.Zero(t, z.Len()) +} + +func TestInterFloat64_Simple(t *testing.T) { + z1 := NewFloat64() + z1.Add(0, "1") + z2 := NewFloat64() + z2.Add(0, "1") + z3 := NewFloat64() + z3.Add(0, "2") + + z := InterFloat64(z1, z2, z3) + assert.Zero(t, z.Len()) +} diff --git a/sys/xxhash3/README.md b/sys/xxhash3/README.md index ed9e29d..e46f670 100644 --- a/sys/xxhash3/README.md +++ b/sys/xxhash3/README.md @@ -90,7 +90,7 @@ Use Hash functions in your code: ``` package main -import "github.com/bytedance/gopkg/util/xxhash3" +import "github.com/songzhibin97/gkit/sys/xxhash3" func main() { println(xxhash3.HashString("hello world!"))