1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "internal/race" 9 "runtime" 10 "sync/atomic" 11 "unsafe" 12 ) 13 14 // A Pool is a set of temporary objects that may be individually saved and 15 // retrieved. 16 // 17 // Any item stored in the Pool may be removed automatically at any time without 18 // notification. If the Pool holds the only reference when this happens, the 19 // item might be deallocated. 20 // 21 // A Pool is safe for use by multiple goroutines simultaneously. 22 // 23 // Pool's purpose is to cache allocated but unused items for later reuse, 24 // relieving pressure on the garbage collector. That is, it makes it easy to 25 // build efficient, thread-safe free lists. However, it is not suitable for all 26 // free lists. 27 // 28 // An appropriate use of a Pool is to manage a group of temporary items 29 // silently shared among and potentially reused by concurrent independent 30 // clients of a package. Pool provides a way to amortize allocation overhead 31 // across many clients. 32 // 33 // An example of good use of a Pool is in the fmt package, which maintains a 34 // dynamically-sized store of temporary output buffers. The store scales under 35 // load (when many goroutines are actively printing) and shrinks when 36 // quiescent. 37 // 38 // On the other hand, a free list maintained as part of a short-lived object is 39 // not a suitable use for a Pool, since the overhead does not amortize well in 40 // that scenario. It is more efficient to have such objects implement their own 41 // free list. 42 // 43 // A Pool must not be copied after first use. 44 // 45 // In the terminology of the Go memory model, a call to Put(x) “synchronizes before” 46 // a call to Get returning that same value x. 47 // Similarly, a call to New returning x “synchronizes before” 48 // a call to Get returning that same value x. 49 type Pool struct { 50 noCopy noCopy 51 52 local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal 53 localSize uintptr // size of the local array 54 55 victim unsafe.Pointer // local from previous cycle 56 victimSize uintptr // size of victims array 57 58 // New optionally specifies a function to generate 59 // a value when Get would otherwise return nil. 60 // It may not be changed concurrently with calls to Get. 61 New func() any 62 } 63 64 // Local per-P Pool appendix. 65 type poolLocalInternal struct { 66 private any // Can be used only by the respective P. 67 shared poolChain // Local P can pushHead/popHead; any P can popTail. 68 } 69 70 type poolLocal struct { 71 poolLocalInternal 72 73 // Prevents false sharing on widespread platforms with 74 // 128 mod (cache line size) = 0 . 75 pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte 76 } 77 78 // from runtime 79 //go:linkname runtime_randn runtime.randn 80 func runtime_randn(n uint32) uint32 81 82 var poolRaceHash [128]uint64 83 84 // poolRaceAddr returns an address to use as the synchronization point 85 // for race detector logic. We don't use the actual pointer stored in x 86 // directly, for fear of conflicting with other synchronization on that address. 87 // Instead, we hash the pointer to get an index into poolRaceHash. 88 // See discussion on golang.org/cl/31589. 89 func poolRaceAddr(x any) unsafe.Pointer { 90 ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1]) 91 h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16) 92 return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))]) 93 } 94 95 // Put adds x to the pool. 96 func (p *Pool) Put(x any) { 97 if x == nil { 98 return 99 } 100 if race.Enabled { 101 if runtime_randn(4) == 0 { 102 // Randomly drop x on floor. 103 return 104 } 105 race.ReleaseMerge(poolRaceAddr(x)) 106 race.Disable() 107 } 108 l, _ := p.pin() 109 if l.private == nil { 110 l.private = x 111 } else { 112 l.shared.pushHead(x) 113 } 114 runtime_procUnpin() 115 if race.Enabled { 116 race.Enable() 117 } 118 } 119 120 // Get selects an arbitrary item from the Pool, removes it from the 121 // Pool, and returns it to the caller. 122 // Get may choose to ignore the pool and treat it as empty. 123 // Callers should not assume any relation between values passed to Put and 124 // the values returned by Get. 125 // 126 // If Get would otherwise return nil and p.New is non-nil, Get returns 127 // the result of calling p.New. 128 func (p *Pool) Get() any { 129 if race.Enabled { 130 race.Disable() 131 } 132 l, pid := p.pin() 133 x := l.private 134 l.private = nil 135 if x == nil { 136 // Try to pop the head of the local shard. We prefer 137 // the head over the tail for temporal locality of 138 // reuse. 139 x, _ = l.shared.popHead() 140 if x == nil { 141 x = p.getSlow(pid) 142 } 143 } 144 runtime_procUnpin() 145 if race.Enabled { 146 race.Enable() 147 if x != nil { 148 race.Acquire(poolRaceAddr(x)) 149 } 150 } 151 if x == nil && p.New != nil { 152 x = p.New() 153 } 154 return x 155 } 156 157 func (p *Pool) getSlow(pid int) any { 158 // See the comment in pin regarding ordering of the loads. 159 size := runtime_LoadAcquintptr(&p.localSize) // load-acquire 160 locals := p.local // load-consume 161 // Try to steal one element from other procs. 162 for i := 0; i < int(size); i++ { 163 l := indexLocal(locals, (pid+i+1)%int(size)) 164 if x, _ := l.shared.popTail(); x != nil { 165 return x 166 } 167 } 168 169 // Try the victim cache. We do this after attempting to steal 170 // from all primary caches because we want objects in the 171 // victim cache to age out if at all possible. 172 size = atomic.LoadUintptr(&p.victimSize) 173 if uintptr(pid) >= size { 174 return nil 175 } 176 locals = p.victim 177 l := indexLocal(locals, pid) 178 if x := l.private; x != nil { 179 l.private = nil 180 return x 181 } 182 for i := 0; i < int(size); i++ { 183 l := indexLocal(locals, (pid+i)%int(size)) 184 if x, _ := l.shared.popTail(); x != nil { 185 return x 186 } 187 } 188 189 // Mark the victim cache as empty for future gets don't bother 190 // with it. 191 atomic.StoreUintptr(&p.victimSize, 0) 192 193 return nil 194 } 195 196 // pin pins the current goroutine to P, disables preemption and 197 // returns poolLocal pool for the P and the P's id. 198 // Caller must call runtime_procUnpin() when done with the pool. 199 func (p *Pool) pin() (*poolLocal, int) { 200 // Check whether p is nil to get a panic. 201 // Otherwise the nil dereference happens while the m is pinned, 202 // causing a fatal error rather than a panic. 203 if p == nil { 204 panic("nil Pool") 205 } 206 207 pid := runtime_procPin() 208 // In pinSlow we store to local and then to localSize, here we load in opposite order. 209 // Since we've disabled preemption, GC cannot happen in between. 210 // Thus here we must observe local at least as large localSize. 211 // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). 212 s := runtime_LoadAcquintptr(&p.localSize) // load-acquire 213 l := p.local // load-consume 214 if uintptr(pid) < s { 215 return indexLocal(l, pid), pid 216 } 217 return p.pinSlow() 218 } 219 220 func (p *Pool) pinSlow() (*poolLocal, int) { 221 // Retry under the mutex. 222 // Can not lock the mutex while pinned. 223 runtime_procUnpin() 224 allPoolsMu.Lock() 225 defer allPoolsMu.Unlock() 226 pid := runtime_procPin() 227 // poolCleanup won't be called while we are pinned. 228 s := p.localSize 229 l := p.local 230 if uintptr(pid) < s { 231 return indexLocal(l, pid), pid 232 } 233 if p.local == nil { 234 allPools = append(allPools, p) 235 } 236 // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. 237 size := runtime.GOMAXPROCS(0) 238 local := make([]poolLocal, size) 239 atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release 240 runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release 241 return &local[pid], pid 242 } 243 244 func poolCleanup() { 245 // This function is called with the world stopped, at the beginning of a garbage collection. 246 // It must not allocate and probably should not call any runtime functions. 247 248 // Because the world is stopped, no pool user can be in a 249 // pinned section (in effect, this has all Ps pinned). 250 251 // Drop victim caches from all pools. 252 for _, p := range oldPools { 253 p.victim = nil 254 p.victimSize = 0 255 } 256 257 // Move primary cache to victim cache. 258 for _, p := range allPools { 259 p.victim = p.local 260 p.victimSize = p.localSize 261 p.local = nil 262 p.localSize = 0 263 } 264 265 // The pools with non-empty primary caches now have non-empty 266 // victim caches and no pools have primary caches. 267 oldPools, allPools = allPools, nil 268 } 269 270 var ( 271 allPoolsMu Mutex 272 273 // allPools is the set of pools that have non-empty primary 274 // caches. Protected by either 1) allPoolsMu and pinning or 2) 275 // STW. 276 allPools []*Pool 277 278 // oldPools is the set of pools that may have non-empty victim 279 // caches. Protected by STW. 280 oldPools []*Pool 281 ) 282 283 func init() { 284 runtime_registerPoolCleanup(poolCleanup) 285 } 286 287 func indexLocal(l unsafe.Pointer, i int) *poolLocal { 288 lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{})) 289 return (*poolLocal)(lp) 290 } 291 292 // Implemented in runtime. 293 func runtime_registerPoolCleanup(cleanup func()) 294 func runtime_procPin() int 295 func runtime_procUnpin() 296 297 // The below are implemented in runtime/internal/atomic and the 298 // compiler also knows to intrinsify the symbol we linkname into this 299 // package. 300 301 //go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr 302 func runtime_LoadAcquintptr(ptr *uintptr) uintptr 303 304 //go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr 305 func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr 306