Source file
src/runtime/mbitmap.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/goarch"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14
15
16
17
18 func addb(p *byte, n uintptr) *byte {
19
20
21
22 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
23 }
24
25
26
27
28
29 func subtractb(p *byte, n uintptr) *byte {
30
31
32
33 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
34 }
35
36
37
38
39
40 func add1(p *byte) *byte {
41
42
43
44 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
45 }
46
47
48
49
50
51
52
53 func subtract1(p *byte) *byte {
54
55
56
57 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
58 }
59
60
61
62
63
64
65
66
67
68
69 type markBits struct {
70 bytep *uint8
71 mask uint8
72 index uintptr
73 }
74
75
76 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
77 bytep, mask := s.allocBits.bitp(allocBitIndex)
78 return markBits{bytep, mask, allocBitIndex}
79 }
80
81
82
83
84
85 func (s *mspan) refillAllocCache(whichByte uint16) {
86 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
87 aCache := uint64(0)
88 aCache |= uint64(bytes[0])
89 aCache |= uint64(bytes[1]) << (1 * 8)
90 aCache |= uint64(bytes[2]) << (2 * 8)
91 aCache |= uint64(bytes[3]) << (3 * 8)
92 aCache |= uint64(bytes[4]) << (4 * 8)
93 aCache |= uint64(bytes[5]) << (5 * 8)
94 aCache |= uint64(bytes[6]) << (6 * 8)
95 aCache |= uint64(bytes[7]) << (7 * 8)
96 s.allocCache = ^aCache
97 }
98
99
100
101
102
103 func (s *mspan) nextFreeIndex() uint16 {
104 sfreeindex := s.freeindex
105 snelems := s.nelems
106 if sfreeindex == snelems {
107 return sfreeindex
108 }
109 if sfreeindex > snelems {
110 throw("s.freeindex > s.nelems")
111 }
112
113 aCache := s.allocCache
114
115 bitIndex := sys.TrailingZeros64(aCache)
116 for bitIndex == 64 {
117
118 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
119 if sfreeindex >= snelems {
120 s.freeindex = snelems
121 return snelems
122 }
123 whichByte := sfreeindex / 8
124
125 s.refillAllocCache(whichByte)
126 aCache = s.allocCache
127 bitIndex = sys.TrailingZeros64(aCache)
128
129
130 }
131 result := sfreeindex + uint16(bitIndex)
132 if result >= snelems {
133 s.freeindex = snelems
134 return snelems
135 }
136
137 s.allocCache >>= uint(bitIndex + 1)
138 sfreeindex = result + 1
139
140 if sfreeindex%64 == 0 && sfreeindex != snelems {
141
142
143
144
145
146 whichByte := sfreeindex / 8
147 s.refillAllocCache(whichByte)
148 }
149 s.freeindex = sfreeindex
150 return result
151 }
152
153
154
155
156
157
158 func (s *mspan) isFree(index uintptr) bool {
159 if index < uintptr(s.freeIndexForScan) {
160 return false
161 }
162 bytep, mask := s.allocBits.bitp(index)
163 return *bytep&mask == 0
164 }
165
166
167
168
169
170
171
172
173
174 func (s *mspan) divideByElemSize(n uintptr) uintptr {
175 const doubleCheck = false
176
177
178 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
179
180 if doubleCheck && q != n/s.elemsize {
181 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
182 throw("bad magic division")
183 }
184 return q
185 }
186
187
188
189
190 func (s *mspan) objIndex(p uintptr) uintptr {
191 return s.divideByElemSize(p - s.base())
192 }
193
194 func markBitsForAddr(p uintptr) markBits {
195 s := spanOf(p)
196 objIndex := s.objIndex(p)
197 return s.markBitsForIndex(objIndex)
198 }
199
200 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
201 bytep, mask := s.gcmarkBits.bitp(objIndex)
202 return markBits{bytep, mask, objIndex}
203 }
204
205 func (s *mspan) markBitsForBase() markBits {
206 return markBits{&s.gcmarkBits.x, uint8(1), 0}
207 }
208
209
210 func (m markBits) isMarked() bool {
211 return *m.bytep&m.mask != 0
212 }
213
214
215 func (m markBits) setMarked() {
216
217
218
219 atomic.Or8(m.bytep, m.mask)
220 }
221
222
223 func (m markBits) setMarkedNonAtomic() {
224 *m.bytep |= m.mask
225 }
226
227
228 func (m markBits) clearMarked() {
229
230
231
232 atomic.And8(m.bytep, ^m.mask)
233 }
234
235
236 func markBitsForSpan(base uintptr) (mbits markBits) {
237 mbits = markBitsForAddr(base)
238 if mbits.mask != 1 {
239 throw("markBitsForSpan: unaligned start")
240 }
241 return mbits
242 }
243
244
245 func (m *markBits) advance() {
246 if m.mask == 1<<7 {
247 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
248 m.mask = 1
249 } else {
250 m.mask = m.mask << 1
251 }
252 m.index++
253 }
254
255
256
257 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
258
259
260 func badPointer(s *mspan, p, refBase, refOff uintptr) {
261
262
263
264
265
266
267
268
269 printlock()
270 print("runtime: pointer ", hex(p))
271 if s != nil {
272 state := s.state.get()
273 if state != mSpanInUse {
274 print(" to unallocated span")
275 } else {
276 print(" to unused region of span")
277 }
278 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
279 }
280 print("\n")
281 if refBase != 0 {
282 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
283 gcDumpObject("object", refBase, refOff)
284 }
285 getg().m.traceback = 2
286 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
287 }
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
305 s = spanOf(p)
306
307
308 if s == nil {
309 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
310
311
312
313 badPointer(s, p, refBase, refOff)
314 }
315 return
316 }
317
318
319
320
321 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
322
323 if state == mSpanManual {
324 return
325 }
326
327
328 if debug.invalidptr != 0 {
329 badPointer(s, p, refBase, refOff)
330 }
331 return
332 }
333
334 objIndex = s.objIndex(p)
335 base = s.base() + objIndex*s.elemsize
336 return
337 }
338
339
340
341
342 func reflect_verifyNotInHeapPtr(p uintptr) bool {
343
344
345
346 return spanOf(p) == nil && p != clobberdeadPtr
347 }
348
349 const ptrBits = 8 * goarch.PtrSize
350
351
352
353
354
355
356
357
358
359 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
360 word := maskOffset / goarch.PtrSize
361 bits = addb(bits, word/8)
362 mask := uint8(1) << (word % 8)
363
364 buf := &getg().m.p.ptr().wbBuf
365 for i := uintptr(0); i < size; i += goarch.PtrSize {
366 if mask == 0 {
367 bits = addb(bits, 1)
368 if *bits == 0 {
369
370 i += 7 * goarch.PtrSize
371 continue
372 }
373 mask = 1
374 }
375 if *bits&mask != 0 {
376 dstx := (*uintptr)(unsafe.Pointer(dst + i))
377 if src == 0 {
378 p := buf.get1()
379 p[0] = *dstx
380 } else {
381 srcx := (*uintptr)(unsafe.Pointer(src + i))
382 p := buf.get2()
383 p[0] = *dstx
384 p[1] = *srcx
385 }
386 }
387 mask <<= 1
388 }
389 }
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
409 if typ == nil {
410 throw("runtime: typeBitsBulkBarrier without type")
411 }
412 if typ.Size_ != size {
413 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
414 throw("runtime: invalid typeBitsBulkBarrier")
415 }
416 if typ.Kind_&kindGCProg != 0 {
417 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
418 throw("runtime: invalid typeBitsBulkBarrier")
419 }
420 if !writeBarrier.enabled {
421 return
422 }
423 ptrmask := typ.GCData
424 buf := &getg().m.p.ptr().wbBuf
425 var bits uint32
426 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
427 if i&(goarch.PtrSize*8-1) == 0 {
428 bits = uint32(*ptrmask)
429 ptrmask = addb(ptrmask, 1)
430 } else {
431 bits = bits >> 1
432 }
433 if bits&1 != 0 {
434 dstx := (*uintptr)(unsafe.Pointer(dst + i))
435 srcx := (*uintptr)(unsafe.Pointer(src + i))
436 p := buf.get2()
437 p[0] = *dstx
438 p[1] = *srcx
439 }
440 }
441 }
442
443
444
445 func (s *mspan) countAlloc() int {
446 count := 0
447 bytes := divRoundUp(uintptr(s.nelems), 8)
448
449
450
451
452 for i := uintptr(0); i < bytes; i += 8 {
453
454
455
456
457 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
458 count += sys.OnesCount64(mrkBits)
459 }
460 return count
461 }
462
463
464
465 func readUintptr(p *byte) uintptr {
466 x := *(*uintptr)(unsafe.Pointer(p))
467 if goarch.BigEndian {
468 if goarch.PtrSize == 8 {
469 return uintptr(sys.Bswap64(uint64(x)))
470 }
471 return uintptr(sys.Bswap32(uint32(x)))
472 }
473 return x
474 }
475
476 var debugPtrmask struct {
477 lock mutex
478 data *byte
479 }
480
481
482
483
484 func progToPointerMask(prog *byte, size uintptr) bitvector {
485 n := (size/goarch.PtrSize + 7) / 8
486 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
487 x[len(x)-1] = 0xa1
488 n = runGCProg(prog, &x[0])
489 if x[len(x)-1] != 0xa1 {
490 throw("progToPointerMask: overflow")
491 }
492 return bitvector{int32(n), &x[0]}
493 }
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510 func runGCProg(prog, dst *byte) uintptr {
511 dstStart := dst
512
513
514 var bits uintptr
515 var nbits uintptr
516
517 p := prog
518 Run:
519 for {
520
521
522 for ; nbits >= 8; nbits -= 8 {
523 *dst = uint8(bits)
524 dst = add1(dst)
525 bits >>= 8
526 }
527
528
529 inst := uintptr(*p)
530 p = add1(p)
531 n := inst & 0x7F
532 if inst&0x80 == 0 {
533
534 if n == 0 {
535
536 break Run
537 }
538 nbyte := n / 8
539 for i := uintptr(0); i < nbyte; i++ {
540 bits |= uintptr(*p) << nbits
541 p = add1(p)
542 *dst = uint8(bits)
543 dst = add1(dst)
544 bits >>= 8
545 }
546 if n %= 8; n > 0 {
547 bits |= uintptr(*p) << nbits
548 p = add1(p)
549 nbits += n
550 }
551 continue Run
552 }
553
554
555 if n == 0 {
556 for off := uint(0); ; off += 7 {
557 x := uintptr(*p)
558 p = add1(p)
559 n |= (x & 0x7F) << off
560 if x&0x80 == 0 {
561 break
562 }
563 }
564 }
565
566
567 c := uintptr(0)
568 for off := uint(0); ; off += 7 {
569 x := uintptr(*p)
570 p = add1(p)
571 c |= (x & 0x7F) << off
572 if x&0x80 == 0 {
573 break
574 }
575 }
576 c *= n
577
578
579
580
581
582
583
584
585 src := dst
586 const maxBits = goarch.PtrSize*8 - 7
587 if n <= maxBits {
588
589 pattern := bits
590 npattern := nbits
591
592
593 src = subtract1(src)
594 for npattern < n {
595 pattern <<= 8
596 pattern |= uintptr(*src)
597 src = subtract1(src)
598 npattern += 8
599 }
600
601
602
603
604
605 if npattern > n {
606 pattern >>= npattern - n
607 npattern = n
608 }
609
610
611 if npattern == 1 {
612
613
614
615
616
617
618 if pattern == 1 {
619 pattern = 1<<maxBits - 1
620 npattern = maxBits
621 } else {
622 npattern = c
623 }
624 } else {
625 b := pattern
626 nb := npattern
627 if nb+nb <= maxBits {
628
629 for nb <= goarch.PtrSize*8 {
630 b |= b << nb
631 nb += nb
632 }
633
634
635 nb = maxBits / npattern * npattern
636 b &= 1<<nb - 1
637 pattern = b
638 npattern = nb
639 }
640 }
641
642
643
644
645 for ; c >= npattern; c -= npattern {
646 bits |= pattern << nbits
647 nbits += npattern
648 for nbits >= 8 {
649 *dst = uint8(bits)
650 dst = add1(dst)
651 bits >>= 8
652 nbits -= 8
653 }
654 }
655
656
657 if c > 0 {
658 pattern &= 1<<c - 1
659 bits |= pattern << nbits
660 nbits += c
661 }
662 continue Run
663 }
664
665
666
667
668 off := n - nbits
669
670 src = subtractb(src, (off+7)/8)
671 if frag := off & 7; frag != 0 {
672 bits |= uintptr(*src) >> (8 - frag) << nbits
673 src = add1(src)
674 nbits += frag
675 c -= frag
676 }
677
678
679 for i := c / 8; i > 0; i-- {
680 bits |= uintptr(*src) << nbits
681 src = add1(src)
682 *dst = uint8(bits)
683 dst = add1(dst)
684 bits >>= 8
685 }
686
687 if c %= 8; c > 0 {
688 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
689 nbits += c
690 }
691 }
692
693
694 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
695 nbits += -nbits & 7
696 for ; nbits > 0; nbits -= 8 {
697 *dst = uint8(bits)
698 dst = add1(dst)
699 bits >>= 8
700 }
701 return totalBits
702 }
703
704
705
706
707
708
709 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
710
711 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
712
713 pages := divRoundUp(bitmapBytes, pageSize)
714 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
715 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
716 return s
717 }
718 func dematerializeGCProg(s *mspan) {
719 mheap_.freeManual(s, spanAllocPtrScalarBits)
720 }
721
722 func dumpGCProg(p *byte) {
723 nptr := 0
724 for {
725 x := *p
726 p = add1(p)
727 if x == 0 {
728 print("\t", nptr, " end\n")
729 break
730 }
731 if x&0x80 == 0 {
732 print("\t", nptr, " lit ", x, ":")
733 n := int(x+7) / 8
734 for i := 0; i < n; i++ {
735 print(" ", hex(*p))
736 p = add1(p)
737 }
738 print("\n")
739 nptr += int(x)
740 } else {
741 nbit := int(x &^ 0x80)
742 if nbit == 0 {
743 for nb := uint(0); ; nb += 7 {
744 x := *p
745 p = add1(p)
746 nbit |= int(x&0x7f) << nb
747 if x&0x80 == 0 {
748 break
749 }
750 }
751 }
752 count := 0
753 for nb := uint(0); ; nb += 7 {
754 x := *p
755 p = add1(p)
756 count |= int(x&0x7f) << nb
757 if x&0x80 == 0 {
758 break
759 }
760 }
761 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
762 nptr += nbit * count
763 }
764 }
765 }
766
767
768
769
770
771
772
773 func reflect_gcbits(x any) []byte {
774 return getgcmask(x)
775 }
776
View as plain text