1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: write barriers. 6 // 7 // For the concurrent garbage collector, the Go compiler implements 8 // updates to pointer-valued fields that may be in heap objects by 9 // emitting calls to write barriers. The main write barrier for 10 // individual pointer writes is gcWriteBarrier and is implemented in 11 // assembly. This file contains write barrier entry points for bulk 12 // operations. See also mwbbuf.go. 13 14 package runtime 15 16 import ( 17 "internal/abi" 18 "internal/goarch" 19 "internal/goexperiment" 20 "unsafe" 21 ) 22 23 // Go uses a hybrid barrier that combines a Yuasa-style deletion 24 // barrier—which shades the object whose reference is being 25 // overwritten—with Dijkstra insertion barrier—which shades the object 26 // whose reference is being written. The insertion part of the barrier 27 // is necessary while the calling goroutine's stack is grey. In 28 // pseudocode, the barrier is: 29 // 30 // writePointer(slot, ptr): 31 // shade(*slot) 32 // if current stack is grey: 33 // shade(ptr) 34 // *slot = ptr 35 // 36 // slot is the destination in Go code. 37 // ptr is the value that goes into the slot in Go code. 38 // 39 // Shade indicates that it has seen a white pointer by adding the referent 40 // to wbuf as well as marking it. 41 // 42 // The two shades and the condition work together to prevent a mutator 43 // from hiding an object from the garbage collector: 44 // 45 // 1. shade(*slot) prevents a mutator from hiding an object by moving 46 // the sole pointer to it from the heap to its stack. If it attempts 47 // to unlink an object from the heap, this will shade it. 48 // 49 // 2. shade(ptr) prevents a mutator from hiding an object by moving 50 // the sole pointer to it from its stack into a black object in the 51 // heap. If it attempts to install the pointer into a black object, 52 // this will shade it. 53 // 54 // 3. Once a goroutine's stack is black, the shade(ptr) becomes 55 // unnecessary. shade(ptr) prevents hiding an object by moving it from 56 // the stack to the heap, but this requires first having a pointer 57 // hidden on the stack. Immediately after a stack is scanned, it only 58 // points to shaded objects, so it's not hiding anything, and the 59 // shade(*slot) prevents it from hiding any other pointers on its 60 // stack. 61 // 62 // For a detailed description of this barrier and proof of 63 // correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md 64 // 65 // 66 // 67 // Dealing with memory ordering: 68 // 69 // Both the Yuasa and Dijkstra barriers can be made conditional on the 70 // color of the object containing the slot. We chose not to make these 71 // conditional because the cost of ensuring that the object holding 72 // the slot doesn't concurrently change color without the mutator 73 // noticing seems prohibitive. 74 // 75 // Consider the following example where the mutator writes into 76 // a slot and then loads the slot's mark bit while the GC thread 77 // writes to the slot's mark bit and then as part of scanning reads 78 // the slot. 79 // 80 // Initially both [slot] and [slotmark] are 0 (nil) 81 // Mutator thread GC thread 82 // st [slot], ptr st [slotmark], 1 83 // 84 // ld r1, [slotmark] ld r2, [slot] 85 // 86 // Without an expensive memory barrier between the st and the ld, the final 87 // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic 88 // example of what can happen when loads are allowed to be reordered with older 89 // stores (avoiding such reorderings lies at the heart of the classic 90 // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory 91 // barriers, which will slow down both the mutator and the GC, we always grey 92 // the ptr object regardless of the slot's color. 93 // 94 // Another place where we intentionally omit memory barriers is when 95 // accessing mheap_.arena_used to check if a pointer points into the 96 // heap. On relaxed memory machines, it's possible for a mutator to 97 // extend the size of the heap by updating arena_used, allocate an 98 // object from this new region, and publish a pointer to that object, 99 // but for tracing running on another processor to observe the pointer 100 // but use the old value of arena_used. In this case, tracing will not 101 // mark the object, even though it's reachable. However, the mutator 102 // is guaranteed to execute a write barrier when it publishes the 103 // pointer, so it will take care of marking the object. A general 104 // consequence of this is that the garbage collector may cache the 105 // value of mheap_.arena_used. (See issue #9984.) 106 // 107 // 108 // Stack writes: 109 // 110 // The compiler omits write barriers for writes to the current frame, 111 // but if a stack pointer has been passed down the call stack, the 112 // compiler will generate a write barrier for writes through that 113 // pointer (because it doesn't know it's not a heap pointer). 114 // 115 // 116 // Global writes: 117 // 118 // The Go garbage collector requires write barriers when heap pointers 119 // are stored in globals. Many garbage collectors ignore writes to 120 // globals and instead pick up global -> heap pointers during 121 // termination. This increases pause time, so we instead rely on write 122 // barriers for writes to globals so that we don't have to rescan 123 // global during mark termination. 124 // 125 // 126 // Publication ordering: 127 // 128 // The write barrier is *pre-publication*, meaning that the write 129 // barrier happens prior to the *slot = ptr write that may make ptr 130 // reachable by some goroutine that currently cannot reach it. 131 // 132 // 133 // Signal handler pointer writes: 134 // 135 // In general, the signal handler cannot safely invoke the write 136 // barrier because it may run without a P or even during the write 137 // barrier. 138 // 139 // There is exactly one exception: profbuf.go omits a barrier during 140 // signal handler profile logging. That's safe only because of the 141 // deletion barrier. See profbuf.go for a detailed argument. If we 142 // remove the deletion barrier, we'll have to work out a new way to 143 // handle the profile logging. 144 145 // typedmemmove copies a value of type typ to dst from src. 146 // Must be nosplit, see #16026. 147 // 148 // TODO: Perfect for go:nosplitrec since we can't have a safe point 149 // anywhere in the bulk barrier or memmove. 150 // 151 //go:nosplit 152 func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) { 153 if dst == src { 154 return 155 } 156 if writeBarrier.enabled && typ.PtrBytes != 0 { 157 // This always copies a full value of type typ so it's safe 158 // to pass typ along as an optimization. See the comment on 159 // bulkBarrierPreWrite. 160 bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) 161 } 162 // There's a race here: if some other goroutine can write to 163 // src, it may change some pointer in src after we've 164 // performed the write barrier but before we perform the 165 // memory copy. This safe because the write performed by that 166 // other goroutine must also be accompanied by a write 167 // barrier, so at worst we've unnecessarily greyed the old 168 // pointer that was in src. 169 memmove(dst, src, typ.Size_) 170 if goexperiment.CgoCheck2 { 171 cgoCheckMemmove2(typ, dst, src, 0, typ.Size_) 172 } 173 } 174 175 // wbZero performs the write barrier operations necessary before 176 // zeroing a region of memory at address dst of type typ. 177 // Does not actually do the zeroing. 178 // 179 //go:nowritebarrierrec 180 //go:nosplit 181 func wbZero(typ *_type, dst unsafe.Pointer) { 182 // This always copies a full value of type typ so it's safe 183 // to pass typ along as an optimization. See the comment on 184 // bulkBarrierPreWrite. 185 bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ) 186 } 187 188 // wbMove performs the write barrier operations necessary before 189 // copying a region of memory from src to dst of type typ. 190 // Does not actually do the copying. 191 // 192 //go:nowritebarrierrec 193 //go:nosplit 194 func wbMove(typ *_type, dst, src unsafe.Pointer) { 195 // This always copies a full value of type typ so it's safe to 196 // pass a type here. 197 // 198 // See the comment on bulkBarrierPreWrite. 199 bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) 200 } 201 202 //go:linkname reflect_typedmemmove reflect.typedmemmove 203 func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) { 204 if raceenabled { 205 raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove)) 206 raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove)) 207 } 208 if msanenabled { 209 msanwrite(dst, typ.Size_) 210 msanread(src, typ.Size_) 211 } 212 if asanenabled { 213 asanwrite(dst, typ.Size_) 214 asanread(src, typ.Size_) 215 } 216 typedmemmove(typ, dst, src) 217 } 218 219 //go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove 220 func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { 221 reflect_typedmemmove(typ, dst, src) 222 } 223 224 // reflectcallmove is invoked by reflectcall to copy the return values 225 // out of the stack and into the heap, invoking the necessary write 226 // barriers. dst, src, and size describe the return value area to 227 // copy. typ describes the entire frame (not just the return values). 228 // typ may be nil, which indicates write barriers are not needed. 229 // 230 // It must be nosplit and must only call nosplit functions because the 231 // stack map of reflectcall is wrong. 232 // 233 //go:nosplit 234 func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { 235 if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize { 236 // Pass nil for the type. dst does not point to value of type typ, 237 // but rather points into one, so applying the optimization is not 238 // safe. See the comment on this function. 239 bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil) 240 } 241 memmove(dst, src, size) 242 243 // Move pointers returned in registers to a place where the GC can see them. 244 for i := range regs.Ints { 245 if regs.ReturnIsPtr.Get(i) { 246 regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i]) 247 } 248 } 249 } 250 251 //go:nosplit 252 func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int { 253 n := dstLen 254 if n > srcLen { 255 n = srcLen 256 } 257 if n == 0 { 258 return 0 259 } 260 261 // The compiler emits calls to typedslicecopy before 262 // instrumentation runs, so unlike the other copying and 263 // assignment operations, it's not instrumented in the calling 264 // code and needs its own instrumentation. 265 if raceenabled { 266 callerpc := getcallerpc() 267 pc := abi.FuncPCABIInternal(slicecopy) 268 racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc) 269 racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc) 270 } 271 if msanenabled { 272 msanwrite(dstPtr, uintptr(n)*typ.Size_) 273 msanread(srcPtr, uintptr(n)*typ.Size_) 274 } 275 if asanenabled { 276 asanwrite(dstPtr, uintptr(n)*typ.Size_) 277 asanread(srcPtr, uintptr(n)*typ.Size_) 278 } 279 280 if goexperiment.CgoCheck2 { 281 cgoCheckSliceCopy(typ, dstPtr, srcPtr, n) 282 } 283 284 if dstPtr == srcPtr { 285 return n 286 } 287 288 // Note: No point in checking typ.PtrBytes here: 289 // compiler only emits calls to typedslicecopy for types with pointers, 290 // and growslice and reflect_typedslicecopy check for pointers 291 // before calling typedslicecopy. 292 size := uintptr(n) * typ.Size_ 293 if writeBarrier.enabled { 294 // This always copies one or more full values of type typ so 295 // it's safe to pass typ along as an optimization. See the comment on 296 // bulkBarrierPreWrite. 297 pwsize := size - typ.Size_ + typ.PtrBytes 298 bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ) 299 } 300 // See typedmemmove for a discussion of the race between the 301 // barrier and memmove. 302 memmove(dstPtr, srcPtr, size) 303 return n 304 } 305 306 //go:linkname reflect_typedslicecopy reflect.typedslicecopy 307 func reflect_typedslicecopy(elemType *_type, dst, src slice) int { 308 if elemType.PtrBytes == 0 { 309 return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_) 310 } 311 return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len) 312 } 313 314 // typedmemclr clears the typed memory at ptr with type typ. The 315 // memory at ptr must already be initialized (and hence in type-safe 316 // state). If the memory is being initialized for the first time, see 317 // memclrNoHeapPointers. 318 // 319 // If the caller knows that typ has pointers, it can alternatively 320 // call memclrHasPointers. 321 // 322 // TODO: A "go:nosplitrec" annotation would be perfect for this. 323 // 324 //go:nosplit 325 func typedmemclr(typ *_type, ptr unsafe.Pointer) { 326 if writeBarrier.enabled && typ.PtrBytes != 0 { 327 // This always clears a whole value of type typ, so it's 328 // safe to pass a type here and apply the optimization. 329 // See the comment on bulkBarrierPreWrite. 330 bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ) 331 } 332 memclrNoHeapPointers(ptr, typ.Size_) 333 } 334 335 //go:linkname reflect_typedmemclr reflect.typedmemclr 336 func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { 337 typedmemclr(typ, ptr) 338 } 339 340 //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial 341 func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { 342 if writeBarrier.enabled && typ.PtrBytes != 0 { 343 // Pass nil for the type. ptr does not point to value of type typ, 344 // but rather points into one so it's not safe to apply the optimization. 345 // See the comment on this function in the reflect package and the 346 // comment on bulkBarrierPreWrite. 347 bulkBarrierPreWrite(uintptr(ptr), 0, size, nil) 348 } 349 memclrNoHeapPointers(ptr, size) 350 } 351 352 //go:linkname reflect_typedarrayclear reflect.typedarrayclear 353 func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { 354 size := typ.Size_ * uintptr(len) 355 if writeBarrier.enabled && typ.PtrBytes != 0 { 356 // This always clears whole elements of an array, so it's 357 // safe to pass a type here. See the comment on bulkBarrierPreWrite. 358 bulkBarrierPreWrite(uintptr(ptr), 0, size, typ) 359 } 360 memclrNoHeapPointers(ptr, size) 361 } 362 363 // memclrHasPointers clears n bytes of typed memory starting at ptr. 364 // The caller must ensure that the type of the object at ptr has 365 // pointers, usually by checking typ.PtrBytes. However, ptr 366 // does not have to point to the start of the allocation. 367 // 368 //go:nosplit 369 func memclrHasPointers(ptr unsafe.Pointer, n uintptr) { 370 // Pass nil for the type since we don't have one here anyway. 371 bulkBarrierPreWrite(uintptr(ptr), 0, n, nil) 372 memclrNoHeapPointers(ptr, n) 373 } 374