1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build !goexperiment.allocheaders 6 7 // Garbage collector: type and heap bitmaps. 8 // 9 // Stack, data, and bss bitmaps 10 // 11 // Stack frames and global variables in the data and bss sections are 12 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit 13 // means the word is a live pointer to be visited by the GC (referred to 14 // as "pointer"). A "0" bit means the word should be ignored by GC 15 // (referred to as "scalar", though it could be a dead pointer value). 16 // 17 // Heap bitmap 18 // 19 // The heap bitmap comprises 1 bit for each pointer-sized word in the heap, 20 // recording whether a pointer is stored in that word or not. This bitmap 21 // is stored in the heapArena metadata backing each heap arena. 22 // That is, if ha is the heapArena for the arena starting at "start", 23 // then ha.bitmap[0] holds the 64 bits for the 64 words "start" 24 // through start+63*ptrSize, ha.bitmap[1] holds the entries for 25 // start+64*ptrSize through start+127*ptrSize, and so on. 26 // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents 27 // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc. 28 // (For 32-bit platforms, s/64/32/.) 29 // 30 // We also keep a noMorePtrs bitmap which allows us to stop scanning 31 // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1 32 // is 1, then the object containing the last word described by ha.bitmap[8*i+j] 33 // has no more pointers beyond those described by ha.bitmap[8*i+j]. 34 // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and 35 // beyond must all be zero until the start of the next object. 36 // 37 // The bitmap for noscan spans is set to all zero at span allocation time. 38 // 39 // The bitmap for unallocated objects in scannable spans is not maintained 40 // (can be junk). 41 42 package runtime 43 44 import ( 45 "internal/abi" 46 "internal/goarch" 47 "runtime/internal/sys" 48 "unsafe" 49 ) 50 51 const ( 52 // For compatibility with the allocheaders GOEXPERIMENT. 53 mallocHeaderSize = 0 54 minSizeForMallocHeader = ^uintptr(0) 55 ) 56 57 // For compatibility with the allocheaders GOEXPERIMENT. 58 // 59 //go:nosplit 60 func heapBitsInSpan(_ uintptr) bool { 61 return false 62 } 63 64 // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC. 65 type heapArenaPtrScalar struct { 66 // bitmap stores the pointer/scalar bitmap for the words in 67 // this arena. See mbitmap.go for a description. 68 // This array uses 1 bit per word of heap, or 1.6% of the heap size (for 64-bit). 69 bitmap [heapArenaBitmapWords]uintptr 70 71 // If the ith bit of noMorePtrs is true, then there are no more 72 // pointers for the object containing the word described by the 73 // high bit of bitmap[i]. 74 // In that case, bitmap[i+1], ... must be zero until the start 75 // of the next object. 76 // We never operate on these entries using bit-parallel techniques, 77 // so it is ok if they are small. Also, they can't be bigger than 78 // uint16 because at that size a single noMorePtrs entry 79 // represents 8K of memory, the minimum size of a span. Any larger 80 // and we'd have to worry about concurrent updates. 81 // This array uses 1 bit per word of bitmap, or .024% of the heap size (for 64-bit). 82 noMorePtrs [heapArenaBitmapWords / 8]uint8 83 } 84 85 // heapBits provides access to the bitmap bits for a single heap word. 86 // The methods on heapBits take value receivers so that the compiler 87 // can more easily inline calls to those methods and registerize the 88 // struct fields independently. 89 type heapBits struct { 90 // heapBits will report on pointers in the range [addr,addr+size). 91 // The low bit of mask contains the pointerness of the word at addr 92 // (assuming valid>0). 93 addr, size uintptr 94 95 // The next few pointer bits representing words starting at addr. 96 // Those bits already returned by next() are zeroed. 97 mask uintptr 98 // Number of bits in mask that are valid. mask is always less than 1<<valid. 99 valid uintptr 100 } 101 102 // heapBitsForAddr returns the heapBits for the address addr. 103 // The caller must ensure [addr,addr+size) is in an allocated span. 104 // In particular, be careful not to point past the end of an object. 105 // 106 // nosplit because it is used during write barriers and must not be preempted. 107 // 108 //go:nosplit 109 func heapBitsForAddr(addr, size uintptr) heapBits { 110 // Find arena 111 ai := arenaIndex(addr) 112 ha := mheap_.arenas[ai.l1()][ai.l2()] 113 114 // Word index in arena. 115 word := addr / goarch.PtrSize % heapArenaWords 116 117 // Word index and bit offset in bitmap array. 118 idx := word / ptrBits 119 off := word % ptrBits 120 121 // Grab relevant bits of bitmap. 122 mask := ha.bitmap[idx] >> off 123 valid := ptrBits - off 124 125 // Process depending on where the object ends. 126 nptr := size / goarch.PtrSize 127 if nptr < valid { 128 // Bits for this object end before the end of this bitmap word. 129 // Squash bits for the following objects. 130 mask &= 1<<(nptr&(ptrBits-1)) - 1 131 valid = nptr 132 } else if nptr == valid { 133 // Bits for this object end at exactly the end of this bitmap word. 134 // All good. 135 } else { 136 // Bits for this object extend into the next bitmap word. See if there 137 // may be any pointers recorded there. 138 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 { 139 // No more pointers in this object after this bitmap word. 140 // Update size so we know not to look there. 141 size = valid * goarch.PtrSize 142 } 143 } 144 145 return heapBits{addr: addr, size: size, mask: mask, valid: valid} 146 } 147 148 // Returns the (absolute) address of the next known pointer and 149 // a heapBits iterator representing any remaining pointers. 150 // If there are no more pointers, returns address 0. 151 // Note that next does not modify h. The caller must record the result. 152 // 153 // nosplit because it is used during write barriers and must not be preempted. 154 // 155 //go:nosplit 156 func (h heapBits) next() (heapBits, uintptr) { 157 for { 158 if h.mask != 0 { 159 var i int 160 if goarch.PtrSize == 8 { 161 i = sys.TrailingZeros64(uint64(h.mask)) 162 } else { 163 i = sys.TrailingZeros32(uint32(h.mask)) 164 } 165 h.mask ^= uintptr(1) << (i & (ptrBits - 1)) 166 return h, h.addr + uintptr(i)*goarch.PtrSize 167 } 168 169 // Skip words that we've already processed. 170 h.addr += h.valid * goarch.PtrSize 171 h.size -= h.valid * goarch.PtrSize 172 if h.size == 0 { 173 return h, 0 // no more pointers 174 } 175 176 // Grab more bits and try again. 177 h = heapBitsForAddr(h.addr, h.size) 178 } 179 } 180 181 // nextFast is like next, but can return 0 even when there are more pointers 182 // to be found. Callers should call next if nextFast returns 0 as its second 183 // return value. 184 // 185 // if addr, h = h.nextFast(); addr == 0 { 186 // if addr, h = h.next(); addr == 0 { 187 // ... no more pointers ... 188 // } 189 // } 190 // ... process pointer at addr ... 191 // 192 // nextFast is designed to be inlineable. 193 // 194 //go:nosplit 195 func (h heapBits) nextFast() (heapBits, uintptr) { 196 // TESTQ/JEQ 197 if h.mask == 0 { 198 return h, 0 199 } 200 // BSFQ 201 var i int 202 if goarch.PtrSize == 8 { 203 i = sys.TrailingZeros64(uint64(h.mask)) 204 } else { 205 i = sys.TrailingZeros32(uint32(h.mask)) 206 } 207 // BTCQ 208 h.mask ^= uintptr(1) << (i & (ptrBits - 1)) 209 // LEAQ (XX)(XX*8) 210 return h, h.addr + uintptr(i)*goarch.PtrSize 211 } 212 213 // bulkBarrierPreWrite executes a write barrier 214 // for every pointer slot in the memory range [src, src+size), 215 // using pointer/scalar information from [dst, dst+size). 216 // This executes the write barriers necessary before a memmove. 217 // src, dst, and size must be pointer-aligned. 218 // The range [dst, dst+size) must lie within a single object. 219 // It does not perform the actual writes. 220 // 221 // As a special case, src == 0 indicates that this is being used for a 222 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write 223 // barrier. 224 // 225 // Callers should call bulkBarrierPreWrite immediately before 226 // calling memmove(dst, src, size). This function is marked nosplit 227 // to avoid being preempted; the GC must not stop the goroutine 228 // between the memmove and the execution of the barriers. 229 // The caller is also responsible for cgo pointer checks if this 230 // may be writing Go pointers into non-Go memory. 231 // 232 // The pointer bitmap is not maintained for allocations containing 233 // no pointers at all; any caller of bulkBarrierPreWrite must first 234 // make sure the underlying allocation contains pointers, usually 235 // by checking typ.PtrBytes. 236 // 237 // The type of the space can be provided purely as an optimization, 238 // however it is not used with GOEXPERIMENT=noallocheaders. 239 // 240 // Callers must perform cgo checks if goexperiment.CgoCheck2. 241 // 242 //go:nosplit 243 func bulkBarrierPreWrite(dst, src, size uintptr, _ *abi.Type) { 244 if (dst|src|size)&(goarch.PtrSize-1) != 0 { 245 throw("bulkBarrierPreWrite: unaligned arguments") 246 } 247 if !writeBarrier.enabled { 248 return 249 } 250 if s := spanOf(dst); s == nil { 251 // If dst is a global, use the data or BSS bitmaps to 252 // execute write barriers. 253 for _, datap := range activeModules() { 254 if datap.data <= dst && dst < datap.edata { 255 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata) 256 return 257 } 258 } 259 for _, datap := range activeModules() { 260 if datap.bss <= dst && dst < datap.ebss { 261 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata) 262 return 263 } 264 } 265 return 266 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst { 267 // dst was heap memory at some point, but isn't now. 268 // It can't be a global. It must be either our stack, 269 // or in the case of direct channel sends, it could be 270 // another stack. Either way, no need for barriers. 271 // This will also catch if dst is in a freed span, 272 // though that should never have. 273 return 274 } 275 276 buf := &getg().m.p.ptr().wbBuf 277 h := heapBitsForAddr(dst, size) 278 if src == 0 { 279 for { 280 var addr uintptr 281 if h, addr = h.next(); addr == 0 { 282 break 283 } 284 dstx := (*uintptr)(unsafe.Pointer(addr)) 285 p := buf.get1() 286 p[0] = *dstx 287 } 288 } else { 289 for { 290 var addr uintptr 291 if h, addr = h.next(); addr == 0 { 292 break 293 } 294 dstx := (*uintptr)(unsafe.Pointer(addr)) 295 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst))) 296 p := buf.get2() 297 p[0] = *dstx 298 p[1] = *srcx 299 } 300 } 301 } 302 303 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but 304 // does not execute write barriers for [dst, dst+size). 305 // 306 // In addition to the requirements of bulkBarrierPreWrite 307 // callers need to ensure [dst, dst+size) is zeroed. 308 // 309 // This is used for special cases where e.g. dst was just 310 // created and zeroed with malloc. 311 // 312 // The type of the space can be provided purely as an optimization, 313 // however it is not used with GOEXPERIMENT=noallocheaders. 314 // 315 //go:nosplit 316 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, _ *abi.Type) { 317 if (dst|src|size)&(goarch.PtrSize-1) != 0 { 318 throw("bulkBarrierPreWrite: unaligned arguments") 319 } 320 if !writeBarrier.enabled { 321 return 322 } 323 buf := &getg().m.p.ptr().wbBuf 324 h := heapBitsForAddr(dst, size) 325 for { 326 var addr uintptr 327 if h, addr = h.next(); addr == 0 { 328 break 329 } 330 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src)) 331 p := buf.get1() 332 p[0] = *srcx 333 } 334 } 335 336 // initHeapBits initializes the heap bitmap for a span. 337 // If this is a span of single pointer allocations, it initializes all 338 // words to pointer. If force is true, clears all bits. 339 func (s *mspan) initHeapBits(forceClear bool) { 340 if forceClear || s.spanclass.noscan() { 341 // Set all the pointer bits to zero. We do this once 342 // when the span is allocated so we don't have to do it 343 // for each object allocation. 344 base := s.base() 345 size := s.npages * pageSize 346 h := writeHeapBitsForAddr(base) 347 h.flush(base, size) 348 return 349 } 350 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize 351 if !isPtrs { 352 return // nothing to do 353 } 354 h := writeHeapBitsForAddr(s.base()) 355 size := s.npages * pageSize 356 nptrs := size / goarch.PtrSize 357 for i := uintptr(0); i < nptrs; i += ptrBits { 358 h = h.write(^uintptr(0), ptrBits) 359 } 360 h.flush(s.base(), size) 361 } 362 363 type writeHeapBits struct { 364 addr uintptr // address that the low bit of mask represents the pointer state of. 365 mask uintptr // some pointer bits starting at the address addr. 366 valid uintptr // number of bits in buf that are valid (including low) 367 low uintptr // number of low-order bits to not overwrite 368 } 369 370 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) { 371 // We start writing bits maybe in the middle of a heap bitmap word. 372 // Remember how many bits into the word we started, so we can be sure 373 // not to overwrite the previous bits. 374 h.low = addr / goarch.PtrSize % ptrBits 375 376 // round down to heap word that starts the bitmap word. 377 h.addr = addr - h.low*goarch.PtrSize 378 379 // We don't have any bits yet. 380 h.mask = 0 381 h.valid = h.low 382 383 return 384 } 385 386 // write appends the pointerness of the next valid pointer slots 387 // using the low valid bits of bits. 1=pointer, 0=scalar. 388 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits { 389 if h.valid+valid <= ptrBits { 390 // Fast path - just accumulate the bits. 391 h.mask |= bits << h.valid 392 h.valid += valid 393 return h 394 } 395 // Too many bits to fit in this word. Write the current word 396 // out and move on to the next word. 397 398 data := h.mask | bits<<h.valid // mask for this word 399 h.mask = bits >> (ptrBits - h.valid) // leftover for next word 400 h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them 401 402 // Flush mask to the memory bitmap. 403 // TODO: figure out how to cache arena lookup. 404 ai := arenaIndex(h.addr) 405 ha := mheap_.arenas[ai.l1()][ai.l2()] 406 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords 407 m := uintptr(1)<<h.low - 1 408 ha.bitmap[idx] = ha.bitmap[idx]&m | data 409 // Note: no synchronization required for this write because 410 // the allocator has exclusive access to the page, and the bitmap 411 // entries are all for a single page. Also, visibility of these 412 // writes is guaranteed by the publication barrier in mallocgc. 413 414 // Clear noMorePtrs bit, since we're going to be writing bits 415 // into the following word. 416 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8) 417 // Note: same as above 418 419 // Move to next word of bitmap. 420 h.addr += ptrBits * goarch.PtrSize 421 h.low = 0 422 return h 423 } 424 425 // Add padding of size bytes. 426 func (h writeHeapBits) pad(size uintptr) writeHeapBits { 427 if size == 0 { 428 return h 429 } 430 words := size / goarch.PtrSize 431 for words > ptrBits { 432 h = h.write(0, ptrBits) 433 words -= ptrBits 434 } 435 return h.write(0, words) 436 } 437 438 // Flush the bits that have been written, and add zeros as needed 439 // to cover the full object [addr, addr+size). 440 func (h writeHeapBits) flush(addr, size uintptr) { 441 // zeros counts the number of bits needed to represent the object minus the 442 // number of bits we've already written. This is the number of 0 bits 443 // that need to be added. 444 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid 445 446 // Add zero bits up to the bitmap word boundary 447 if zeros > 0 { 448 z := ptrBits - h.valid 449 if z > zeros { 450 z = zeros 451 } 452 h.valid += z 453 zeros -= z 454 } 455 456 // Find word in bitmap that we're going to write. 457 ai := arenaIndex(h.addr) 458 ha := mheap_.arenas[ai.l1()][ai.l2()] 459 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords 460 461 // Write remaining bits. 462 if h.valid != h.low { 463 m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low" 464 m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid" 465 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask 466 } 467 if zeros == 0 { 468 return 469 } 470 471 // Record in the noMorePtrs map that there won't be any more 1 bits, 472 // so readers can stop early. 473 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8) 474 475 // Advance to next bitmap word. 476 h.addr += ptrBits * goarch.PtrSize 477 478 // Continue on writing zeros for the rest of the object. 479 // For standard use of the ptr bits this is not required, as 480 // the bits are read from the beginning of the object. Some uses, 481 // like noscan spans, oblets, bulk write barriers, and cgocheck, might 482 // start mid-object, so these writes are still required. 483 for { 484 // Write zero bits. 485 ai := arenaIndex(h.addr) 486 ha := mheap_.arenas[ai.l1()][ai.l2()] 487 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords 488 if zeros < ptrBits { 489 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1 490 break 491 } else if zeros == ptrBits { 492 ha.bitmap[idx] = 0 493 break 494 } else { 495 ha.bitmap[idx] = 0 496 zeros -= ptrBits 497 } 498 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8) 499 h.addr += ptrBits * goarch.PtrSize 500 } 501 } 502 503 // heapBitsSetType records that the new allocation [x, x+size) 504 // holds in [x, x+dataSize) one or more values of type typ. 505 // (The number of values is given by dataSize / typ.Size.) 506 // If dataSize < size, the fragment [x+dataSize, x+size) is 507 // recorded as non-pointer data. 508 // It is known that the type has pointers somewhere; 509 // malloc does not call heapBitsSetType when there are no pointers, 510 // because all free objects are marked as noscan during 511 // heapBitsSweepSpan. 512 // 513 // There can only be one allocation from a given span active at a time, 514 // and the bitmap for a span always falls on word boundaries, 515 // so there are no write-write races for access to the heap bitmap. 516 // Hence, heapBitsSetType can access the bitmap without atomics. 517 // 518 // There can be read-write races between heapBitsSetType and things 519 // that read the heap bitmap like scanobject. However, since 520 // heapBitsSetType is only used for objects that have not yet been 521 // made reachable, readers will ignore bits being modified by this 522 // function. This does mean this function cannot transiently modify 523 // bits that belong to neighboring objects. Also, on weakly-ordered 524 // machines, callers must execute a store/store (publication) barrier 525 // between calling this function and making the object reachable. 526 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { 527 const doubleCheck = false // slow but helpful; enable to test modifications to this code 528 529 if doubleCheck && dataSize%typ.Size_ != 0 { 530 throw("heapBitsSetType: dataSize not a multiple of typ.Size") 531 } 532 533 if goarch.PtrSize == 8 && size == goarch.PtrSize { 534 // It's one word and it has pointers, it must be a pointer. 535 // Since all allocated one-word objects are pointers 536 // (non-pointers are aggregated into tinySize allocations), 537 // (*mspan).initHeapBits sets the pointer bits for us. 538 // Nothing to do here. 539 if doubleCheck { 540 h, addr := heapBitsForAddr(x, size).next() 541 if addr != x { 542 throw("heapBitsSetType: pointer bit missing") 543 } 544 _, addr = h.next() 545 if addr != 0 { 546 throw("heapBitsSetType: second pointer bit found") 547 } 548 } 549 return 550 } 551 552 h := writeHeapBitsForAddr(x) 553 554 // Handle GC program. 555 if typ.Kind_&kindGCProg != 0 { 556 // Expand the gc program into the storage we're going to use for the actual object. 557 obj := (*uint8)(unsafe.Pointer(x)) 558 n := runGCProg(addb(typ.GCData, 4), obj) 559 // Use the expanded program to set the heap bits. 560 for i := uintptr(0); true; i += typ.Size_ { 561 // Copy expanded program to heap bitmap. 562 p := obj 563 j := n 564 for j > 8 { 565 h = h.write(uintptr(*p), 8) 566 p = add1(p) 567 j -= 8 568 } 569 h = h.write(uintptr(*p), j) 570 571 if i+typ.Size_ == dataSize { 572 break // no padding after last element 573 } 574 575 // Pad with zeros to the start of the next element. 576 h = h.pad(typ.Size_ - n*goarch.PtrSize) 577 } 578 579 h.flush(x, size) 580 581 // Erase the expanded GC program. 582 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8) 583 return 584 } 585 586 // Note about sizes: 587 // 588 // typ.Size is the number of words in the object, 589 // and typ.PtrBytes is the number of words in the prefix 590 // of the object that contains pointers. That is, the final 591 // typ.Size - typ.PtrBytes words contain no pointers. 592 // This allows optimization of a common pattern where 593 // an object has a small header followed by a large scalar 594 // buffer. If we know the pointers are over, we don't have 595 // to scan the buffer's heap bitmap at all. 596 // The 1-bit ptrmasks are sized to contain only bits for 597 // the typ.PtrBytes prefix, zero padded out to a full byte 598 // of bitmap. If there is more room in the allocated object, 599 // that space is pointerless. The noMorePtrs bitmap will prevent 600 // scanning large pointerless tails of an object. 601 // 602 // Replicated copies are not as nice: if there is an array of 603 // objects with scalar tails, all but the last tail does have to 604 // be initialized, because there is no way to say "skip forward". 605 606 ptrs := typ.PtrBytes / goarch.PtrSize 607 if typ.Size_ == dataSize { // Single element 608 if ptrs <= ptrBits { // Single small element 609 m := readUintptr(typ.GCData) 610 h = h.write(m, ptrs) 611 } else { // Single large element 612 p := typ.GCData 613 for { 614 h = h.write(readUintptr(p), ptrBits) 615 p = addb(p, ptrBits/8) 616 ptrs -= ptrBits 617 if ptrs <= ptrBits { 618 break 619 } 620 } 621 m := readUintptr(p) 622 h = h.write(m, ptrs) 623 } 624 } else { // Repeated element 625 words := typ.Size_ / goarch.PtrSize // total words, including scalar tail 626 if words <= ptrBits { // Repeated small element 627 n := dataSize / typ.Size_ 628 m := readUintptr(typ.GCData) 629 // Make larger unit to repeat 630 for words <= ptrBits/2 { 631 if n&1 != 0 { 632 h = h.write(m, words) 633 } 634 n /= 2 635 m |= m << words 636 ptrs += words 637 words *= 2 638 if n == 1 { 639 break 640 } 641 } 642 for n > 1 { 643 h = h.write(m, words) 644 n-- 645 } 646 h = h.write(m, ptrs) 647 } else { // Repeated large element 648 for i := uintptr(0); true; i += typ.Size_ { 649 p := typ.GCData 650 j := ptrs 651 for j > ptrBits { 652 h = h.write(readUintptr(p), ptrBits) 653 p = addb(p, ptrBits/8) 654 j -= ptrBits 655 } 656 m := readUintptr(p) 657 h = h.write(m, j) 658 if i+typ.Size_ == dataSize { 659 break // don't need the trailing nonptr bits on the last element. 660 } 661 // Pad with zeros to the start of the next element. 662 h = h.pad(typ.Size_ - typ.PtrBytes) 663 } 664 } 665 } 666 h.flush(x, size) 667 668 if doubleCheck { 669 h := heapBitsForAddr(x, size) 670 for i := uintptr(0); i < size; i += goarch.PtrSize { 671 // Compute the pointer bit we want at offset i. 672 want := false 673 if i < dataSize { 674 off := i % typ.Size_ 675 if off < typ.PtrBytes { 676 j := off / goarch.PtrSize 677 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 678 } 679 } 680 if want { 681 var addr uintptr 682 h, addr = h.next() 683 if addr != x+i { 684 throw("heapBitsSetType: pointer entry not correct") 685 } 686 } 687 } 688 if _, addr := h.next(); addr != 0 { 689 throw("heapBitsSetType: extra pointer") 690 } 691 } 692 } 693 694 // For goexperiment.AllocHeaders 695 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) { 696 return 0 697 } 698 699 // Testing. 700 701 // Returns GC type info for the pointer stored in ep for testing. 702 // If ep points to the stack, only static live information will be returned 703 // (i.e. not for objects which are only dynamically live stack objects). 704 func getgcmask(ep any) (mask []byte) { 705 e := *efaceOf(&ep) 706 p := e.data 707 t := e._type 708 // data or bss 709 for _, datap := range activeModules() { 710 // data 711 if datap.data <= uintptr(p) && uintptr(p) < datap.edata { 712 bitmap := datap.gcdatamask.bytedata 713 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ 714 mask = make([]byte, n/goarch.PtrSize) 715 for i := uintptr(0); i < n; i += goarch.PtrSize { 716 off := (uintptr(p) + i - datap.data) / goarch.PtrSize 717 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 718 } 719 return 720 } 721 722 // bss 723 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { 724 bitmap := datap.gcbssmask.bytedata 725 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ 726 mask = make([]byte, n/goarch.PtrSize) 727 for i := uintptr(0); i < n; i += goarch.PtrSize { 728 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize 729 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 730 } 731 return 732 } 733 } 734 735 // heap 736 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 { 737 if s.spanclass.noscan() { 738 return nil 739 } 740 n := s.elemsize 741 hbits := heapBitsForAddr(base, n) 742 mask = make([]byte, n/goarch.PtrSize) 743 for { 744 var addr uintptr 745 if hbits, addr = hbits.next(); addr == 0 { 746 break 747 } 748 mask[(addr-base)/goarch.PtrSize] = 1 749 } 750 // Callers expect this mask to end at the last pointer. 751 for len(mask) > 0 && mask[len(mask)-1] == 0 { 752 mask = mask[:len(mask)-1] 753 } 754 755 // Make sure we keep ep alive. We may have stopped referencing 756 // ep's data pointer sometime before this point and it's possible 757 // for that memory to get freed. 758 KeepAlive(ep) 759 return 760 } 761 762 // stack 763 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi { 764 found := false 765 var u unwinder 766 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() { 767 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp { 768 found = true 769 break 770 } 771 } 772 if found { 773 locals, _, _ := u.frame.getStackMap(false) 774 if locals.n == 0 { 775 return 776 } 777 size := uintptr(locals.n) * goarch.PtrSize 778 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ 779 mask = make([]byte, n/goarch.PtrSize) 780 for i := uintptr(0); i < n; i += goarch.PtrSize { 781 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize 782 mask[i/goarch.PtrSize] = locals.ptrbit(off) 783 } 784 } 785 return 786 } 787 788 // otherwise, not something the GC knows about. 789 // possibly read-only data, like malloc(0). 790 // must not have pointers 791 return 792 } 793 794 // userArenaHeapBitsSetType is the equivalent of heapBitsSetType but for 795 // non-slice-backing-store Go values allocated in a user arena chunk. It 796 // sets up the heap bitmap for the value with type typ allocated at address ptr. 797 // base is the base address of the arena chunk. 798 func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) { 799 base := s.base() 800 h := writeHeapBitsForAddr(uintptr(ptr)) 801 802 // Our last allocation might have ended right at a noMorePtrs mark, 803 // which we would not have erased. We need to erase that mark here, 804 // because we're going to start adding new heap bitmap bits. 805 // We only need to clear one mark, because below we make sure to 806 // pad out the bits with zeroes and only write one noMorePtrs bit 807 // for each new object. 808 // (This is only necessary at noMorePtrs boundaries, as noMorePtrs 809 // marks within an object allocated with newAt will be erased by 810 // the normal writeHeapBitsForAddr mechanism.) 811 // 812 // Note that we skip this if this is the first allocation in the 813 // arena because there's definitely no previous noMorePtrs mark 814 // (in fact, we *must* do this, because we're going to try to back 815 // up a pointer to fix this up). 816 if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base { 817 // Back up one pointer and rewrite that pointer. That will 818 // cause the writeHeapBits implementation to clear the 819 // noMorePtrs bit we need to clear. 820 r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize) 821 _, p := r.next() 822 b := uintptr(0) 823 if p == uintptr(ptr)-goarch.PtrSize { 824 b = 1 825 } 826 h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize) 827 h = h.write(b, 1) 828 } 829 830 p := typ.GCData // start of 1-bit pointer mask (or GC program) 831 var gcProgBits uintptr 832 if typ.Kind_&kindGCProg != 0 { 833 // Expand gc program, using the object itself for storage. 834 gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr)) 835 p = (*byte)(ptr) 836 } 837 nb := typ.PtrBytes / goarch.PtrSize 838 839 for i := uintptr(0); i < nb; i += ptrBits { 840 k := nb - i 841 if k > ptrBits { 842 k = ptrBits 843 } 844 h = h.write(readUintptr(addb(p, i/8)), k) 845 } 846 // Note: we call pad here to ensure we emit explicit 0 bits 847 // for the pointerless tail of the object. This ensures that 848 // there's only a single noMorePtrs mark for the next object 849 // to clear. We don't need to do this to clear stale noMorePtrs 850 // markers from previous uses because arena chunk pointer bitmaps 851 // are always fully cleared when reused. 852 h = h.pad(typ.Size_ - typ.PtrBytes) 853 h.flush(uintptr(ptr), typ.Size_) 854 855 if typ.Kind_&kindGCProg != 0 { 856 // Zero out temporary ptrmask buffer inside object. 857 memclrNoHeapPointers(ptr, (gcProgBits+7)/8) 858 } 859 860 // Double-check that the bitmap was written out correctly. 861 // 862 // Derived from heapBitsSetType. 863 const doubleCheck = false 864 if doubleCheck { 865 size := typ.Size_ 866 x := uintptr(ptr) 867 h := heapBitsForAddr(x, size) 868 for i := uintptr(0); i < size; i += goarch.PtrSize { 869 // Compute the pointer bit we want at offset i. 870 want := false 871 off := i % typ.Size_ 872 if off < typ.PtrBytes { 873 j := off / goarch.PtrSize 874 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 875 } 876 if want { 877 var addr uintptr 878 h, addr = h.next() 879 if addr != x+i { 880 throw("userArenaHeapBitsSetType: pointer entry not correct") 881 } 882 } 883 } 884 if _, addr := h.next(); addr != 0 { 885 throw("userArenaHeapBitsSetType: extra pointer") 886 } 887 } 888 } 889 890 // For goexperiment.AllocHeaders. 891 type typePointers struct { 892 addr uintptr 893 } 894 895 // For goexperiment.AllocHeaders. 896 // 897 //go:nosplit 898 func (span *mspan) typePointersOf(addr, size uintptr) typePointers { 899 panic("not implemented") 900 } 901 902 // For goexperiment.AllocHeaders. 903 // 904 //go:nosplit 905 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { 906 panic("not implemented") 907 } 908 909 // For goexperiment.AllocHeaders. 910 // 911 //go:nosplit 912 func (tp typePointers) nextFast() (typePointers, uintptr) { 913 panic("not implemented") 914 } 915 916 // For goexperiment.AllocHeaders. 917 // 918 //go:nosplit 919 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) { 920 panic("not implemented") 921 } 922 923 // For goexperiment.AllocHeaders. 924 // 925 //go:nosplit 926 func (tp typePointers) fastForward(n, limit uintptr) typePointers { 927 panic("not implemented") 928 } 929 930 // For goexperiment.AllocHeaders, to pass TestIntendedInlining. 931 func (s *mspan) writeUserArenaHeapBits() { 932 panic("not implemented") 933 } 934 935 // For goexperiment.AllocHeaders, to pass TestIntendedInlining. 936 func heapBitsSlice() { 937 panic("not implemented") 938 } 939