Source file
src/runtime/export_test.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goexperiment"
13 "internal/goos"
14 "runtime/internal/atomic"
15 "runtime/internal/sys"
16 "unsafe"
17 )
18
19 var Fadd64 = fadd64
20 var Fsub64 = fsub64
21 var Fmul64 = fmul64
22 var Fdiv64 = fdiv64
23 var F64to32 = f64to32
24 var F32to64 = f32to64
25 var Fcmp64 = fcmp64
26 var Fintto64 = fintto64
27 var F64toint = f64toint
28
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
33
34 var ReadRandomFailed = &readRandomFailed
35
36 var Fastlog2 = fastlog2
37
38 var Atoi = atoi
39 var Atoi32 = atoi32
40 var ParseByteCount = parseByteCount
41
42 var Nanotime = nanotime
43 var NetpollBreak = netpollBreak
44 var Usleep = usleep
45
46 var PhysPageSize = physPageSize
47 var PhysHugePageSize = physHugePageSize
48
49 var NetpollGenericInit = netpollGenericInit
50
51 var Memmove = memmove
52 var MemclrNoHeapPointers = memclrNoHeapPointers
53
54 var CgoCheckPointer = cgoCheckPointer
55
56 const CrashStackImplemented = crashStackImplemented
57
58 const TracebackInnerFrames = tracebackInnerFrames
59 const TracebackOuterFrames = tracebackOuterFrames
60
61 var MapKeys = keys
62 var MapValues = values
63
64 var LockPartialOrder = lockPartialOrder
65
66 type LockRank lockRank
67
68 func (l LockRank) String() string {
69 return lockRank(l).String()
70 }
71
72 const PreemptMSupported = preemptMSupported
73
74 type LFNode struct {
75 Next uint64
76 Pushcnt uintptr
77 }
78
79 func LFStackPush(head *uint64, node *LFNode) {
80 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
81 }
82
83 func LFStackPop(head *uint64) *LFNode {
84 return (*LFNode)((*lfstack)(head).pop())
85 }
86 func LFNodeValidate(node *LFNode) {
87 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
88 }
89
90 func Netpoll(delta int64) {
91 systemstack(func() {
92 netpoll(delta)
93 })
94 }
95
96 func GCMask(x any) (ret []byte) {
97 systemstack(func() {
98 ret = getgcmask(x)
99 })
100 return
101 }
102
103 func RunSchedLocalQueueTest() {
104 pp := new(p)
105 gs := make([]g, len(pp.runq))
106 Escape(gs)
107 for i := 0; i < len(pp.runq); i++ {
108 if g, _ := runqget(pp); g != nil {
109 throw("runq is not empty initially")
110 }
111 for j := 0; j < i; j++ {
112 runqput(pp, &gs[i], false)
113 }
114 for j := 0; j < i; j++ {
115 if g, _ := runqget(pp); g != &gs[i] {
116 print("bad element at iter ", i, "/", j, "\n")
117 throw("bad element")
118 }
119 }
120 if g, _ := runqget(pp); g != nil {
121 throw("runq is not empty afterwards")
122 }
123 }
124 }
125
126 func RunSchedLocalQueueStealTest() {
127 p1 := new(p)
128 p2 := new(p)
129 gs := make([]g, len(p1.runq))
130 Escape(gs)
131 for i := 0; i < len(p1.runq); i++ {
132 for j := 0; j < i; j++ {
133 gs[j].sig = 0
134 runqput(p1, &gs[j], false)
135 }
136 gp := runqsteal(p2, p1, true)
137 s := 0
138 if gp != nil {
139 s++
140 gp.sig++
141 }
142 for {
143 gp, _ = runqget(p2)
144 if gp == nil {
145 break
146 }
147 s++
148 gp.sig++
149 }
150 for {
151 gp, _ = runqget(p1)
152 if gp == nil {
153 break
154 }
155 gp.sig++
156 }
157 for j := 0; j < i; j++ {
158 if gs[j].sig != 1 {
159 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
160 throw("bad element")
161 }
162 }
163 if s != i/2 && s != i/2+1 {
164 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
165 throw("bad steal")
166 }
167 }
168 }
169
170 func RunSchedLocalQueueEmptyTest(iters int) {
171
172
173
174
175 done := make(chan bool, 1)
176 p := new(p)
177 gs := make([]g, 2)
178 Escape(gs)
179 ready := new(uint32)
180 for i := 0; i < iters; i++ {
181 *ready = 0
182 next0 := (i & 1) == 0
183 next1 := (i & 2) == 0
184 runqput(p, &gs[0], next0)
185 go func() {
186 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
187 }
188 if runqempty(p) {
189 println("next:", next0, next1)
190 throw("queue is empty")
191 }
192 done <- true
193 }()
194 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
195 }
196 runqput(p, &gs[1], next1)
197 runqget(p)
198 <-done
199 runqget(p)
200 }
201 }
202
203 var (
204 StringHash = stringHash
205 BytesHash = bytesHash
206 Int32Hash = int32Hash
207 Int64Hash = int64Hash
208 MemHash = memhash
209 MemHash32 = memhash32
210 MemHash64 = memhash64
211 EfaceHash = efaceHash
212 IfaceHash = ifaceHash
213 )
214
215 var UseAeshash = &useAeshash
216
217 func MemclrBytes(b []byte) {
218 s := (*slice)(unsafe.Pointer(&b))
219 memclrNoHeapPointers(s.array, uintptr(s.len))
220 }
221
222 const HashLoad = hashLoad
223
224
225 func GostringW(w []uint16) (s string) {
226 systemstack(func() {
227 s = gostringw(&w[0])
228 })
229 return
230 }
231
232 var Open = open
233 var Close = closefd
234 var Read = read
235 var Write = write
236
237 func Envs() []string { return envs }
238 func SetEnvs(e []string) { envs = e }
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254 type blockWrapper[T any] struct {
255 value T
256 _ [_MaxSmallSize]byte
257 }
258
259 func BenchSetType[T any](n int, resetTimer func()) {
260 x := new(blockWrapper[T])
261
262
263
264 Escape(x)
265
266
267 var i any = *new(T)
268 e := *efaceOf(&i)
269 t := e._type
270
271
272 benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
273 }
274
275 const maxArrayBlockWrapperLen = 32
276
277
278
279 type arrayBlockWrapper[T any] struct {
280 value [maxArrayBlockWrapperLen]T
281 _ [_MaxSmallSize]byte
282 }
283
284
285
286 type arrayLargeBlockWrapper[T any] struct {
287 value [1024]T
288 _ [_MaxSmallSize]byte
289 }
290
291 func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
292
293
294
295
296 var y unsafe.Pointer
297 if len <= maxArrayBlockWrapperLen {
298 x := new(arrayBlockWrapper[T])
299
300
301 Escape(x)
302 y = unsafe.Pointer(&x.value[0])
303 } else {
304 x := new(arrayLargeBlockWrapper[T])
305 Escape(x)
306 y = unsafe.Pointer(&x.value[0])
307 }
308
309
310 var i any = *new(T)
311 e := *efaceOf(&i)
312 t := e._type
313
314
315
316 benchSetType(n, resetTimer, len, y, t)
317 }
318
319
320
321
322
323
324
325
326
327
328
329
330
331 func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
332
333
334
335
336 if goexperiment.AllocHeaders {
337 panic("called benchSetType with allocheaders experiment enabled")
338 }
339
340
341 size := t.Size() * uintptr(len)
342
343
344 s := spanOfHeap(uintptr(x))
345 if s == nil {
346 panic("no heap span for input")
347 }
348 if s.spanclass.sizeclass() != 0 {
349 panic("span is not a large object span")
350 }
351
352
353
354 allocSize := roundupsize(size, t.PtrBytes == 0)
355 if s.npages*pageSize < allocSize {
356 panic("backing span not large enough for benchmark")
357 }
358
359
360
361 resetTimer()
362 systemstack(func() {
363 for i := 0; i < n; i++ {
364 heapBitsSetType(uintptr(x), allocSize, size, t)
365 }
366 })
367
368
369 KeepAlive(x)
370 }
371
372 const PtrSize = goarch.PtrSize
373
374 var ForceGCPeriod = &forcegcperiod
375
376
377
378
379 func SetTracebackEnv(level string) {
380 setTraceback(level)
381 traceback_env = traceback_cache
382 }
383
384 var ReadUnaligned32 = readUnaligned32
385 var ReadUnaligned64 = readUnaligned64
386
387 func CountPagesInUse() (pagesInUse, counted uintptr) {
388 stw := stopTheWorld(stwForTestCountPagesInUse)
389
390 pagesInUse = mheap_.pagesInUse.Load()
391
392 for _, s := range mheap_.allspans {
393 if s.state.get() == mSpanInUse {
394 counted += s.npages
395 }
396 }
397
398 startTheWorld(stw)
399
400 return
401 }
402
403 func Fastrand() uint32 { return uint32(rand()) }
404 func Fastrand64() uint64 { return rand() }
405 func Fastrandn(n uint32) uint32 { return randn(n) }
406
407 type ProfBuf profBuf
408
409 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
410 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
411 }
412
413 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
414 (*profBuf)(p).write(tag, now, hdr, stk)
415 }
416
417 const (
418 ProfBufBlocking = profBufBlocking
419 ProfBufNonBlocking = profBufNonBlocking
420 )
421
422 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
423 return (*profBuf)(p).read(mode)
424 }
425
426 func (p *ProfBuf) Close() {
427 (*profBuf)(p).close()
428 }
429
430 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
431 stw := stopTheWorld(stwForTestReadMetricsSlow)
432
433
434
435 metricsLock()
436 initMetrics()
437
438 systemstack(func() {
439
440
441 getg().racectx = getg().m.curg.racectx
442
443
444
445
446
447
448 readMetricsLocked(samplesp, len, cap)
449
450
451
452
453
454 readmemstats_m(memStats)
455
456
457
458
459 readMetricsLocked(samplesp, len, cap)
460
461
462 getg().racectx = 0
463 })
464 metricsUnlock()
465
466 startTheWorld(stw)
467 }
468
469 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
470
471
472
473 func ReadMemStatsSlow() (base, slow MemStats) {
474 stw := stopTheWorld(stwForTestReadMemStatsSlow)
475
476
477 systemstack(func() {
478
479 getg().m.mallocing++
480
481 readmemstats_m(&base)
482
483
484
485 slow = base
486 slow.Alloc = 0
487 slow.TotalAlloc = 0
488 slow.Mallocs = 0
489 slow.Frees = 0
490 slow.HeapReleased = 0
491 var bySize [_NumSizeClasses]struct {
492 Mallocs, Frees uint64
493 }
494
495
496 for _, s := range mheap_.allspans {
497 if s.state.get() != mSpanInUse {
498 continue
499 }
500 if s.isUnusedUserArenaChunk() {
501 continue
502 }
503 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
504 slow.Mallocs++
505 slow.Alloc += uint64(s.elemsize)
506 } else {
507 slow.Mallocs += uint64(s.allocCount)
508 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
509 bySize[sizeclass].Mallocs += uint64(s.allocCount)
510 }
511 }
512
513
514 var m heapStatsDelta
515 memstats.heapStats.unsafeRead(&m)
516
517
518 var smallFree uint64
519 for i := 0; i < _NumSizeClasses; i++ {
520 slow.Frees += m.smallFreeCount[i]
521 bySize[i].Frees += m.smallFreeCount[i]
522 bySize[i].Mallocs += m.smallFreeCount[i]
523 smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
524 }
525 slow.Frees += m.tinyAllocCount + m.largeFreeCount
526 slow.Mallocs += slow.Frees
527
528 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
529
530 for i := range slow.BySize {
531 slow.BySize[i].Mallocs = bySize[i].Mallocs
532 slow.BySize[i].Frees = bySize[i].Frees
533 }
534
535 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
536 chunk := mheap_.pages.tryChunkOf(i)
537 if chunk == nil {
538 continue
539 }
540 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
541 slow.HeapReleased += uint64(pg) * pageSize
542 }
543 for _, p := range allp {
544 pg := sys.OnesCount64(p.pcache.scav)
545 slow.HeapReleased += uint64(pg) * pageSize
546 }
547
548 getg().m.mallocing--
549 })
550
551 startTheWorld(stw)
552 return
553 }
554
555
556
557
558 func ShrinkStackAndVerifyFramePointers() {
559 before := stackPoisonCopy
560 defer func() { stackPoisonCopy = before }()
561 stackPoisonCopy = 1
562
563 gp := getg()
564 systemstack(func() {
565 shrinkstack(gp)
566 })
567
568
569 FPCallers(make([]uintptr, 1024))
570 }
571
572
573
574
575 func BlockOnSystemStack() {
576 systemstack(blockOnSystemStackInternal)
577 }
578
579 func blockOnSystemStackInternal() {
580 print("x\n")
581 lock(&deadlock)
582 lock(&deadlock)
583 }
584
585 type RWMutex struct {
586 rw rwmutex
587 }
588
589 func (rw *RWMutex) Init() {
590 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
591 }
592
593 func (rw *RWMutex) RLock() {
594 rw.rw.rlock()
595 }
596
597 func (rw *RWMutex) RUnlock() {
598 rw.rw.runlock()
599 }
600
601 func (rw *RWMutex) Lock() {
602 rw.rw.lock()
603 }
604
605 func (rw *RWMutex) Unlock() {
606 rw.rw.unlock()
607 }
608
609 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
610
611 func MapBucketsCount(m map[int]int) int {
612 h := *(**hmap)(unsafe.Pointer(&m))
613 return 1 << h.B
614 }
615
616 func MapBucketsPointerIsNil(m map[int]int) bool {
617 h := *(**hmap)(unsafe.Pointer(&m))
618 return h.buckets == nil
619 }
620
621 func OverLoadFactor(count int, B uint8) bool {
622 return overLoadFactor(count, B)
623 }
624
625 func LockOSCounts() (external, internal uint32) {
626 gp := getg()
627 if gp.m.lockedExt+gp.m.lockedInt == 0 {
628 if gp.lockedm != 0 {
629 panic("lockedm on non-locked goroutine")
630 }
631 } else {
632 if gp.lockedm == 0 {
633 panic("nil lockedm on locked goroutine")
634 }
635 }
636 return gp.m.lockedExt, gp.m.lockedInt
637 }
638
639
640 func TracebackSystemstack(stk []uintptr, i int) int {
641 if i == 0 {
642 pc, sp := getcallerpc(), getcallersp()
643 var u unwinder
644 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
645 return tracebackPCs(&u, 0, stk)
646 }
647 n := 0
648 systemstack(func() {
649 n = TracebackSystemstack(stk, i-1)
650 })
651 return n
652 }
653
654 func KeepNArenaHints(n int) {
655 hint := mheap_.arenaHints
656 for i := 1; i < n; i++ {
657 hint = hint.next
658 if hint == nil {
659 return
660 }
661 }
662 hint.next = nil
663 }
664
665
666
667
668
669
670
671 func MapNextArenaHint() (start, end uintptr, ok bool) {
672 hint := mheap_.arenaHints
673 addr := hint.addr
674 if hint.down {
675 start, end = addr-heapArenaBytes, addr
676 addr -= physPageSize
677 } else {
678 start, end = addr, addr+heapArenaBytes
679 }
680 got := sysReserve(unsafe.Pointer(addr), physPageSize)
681 ok = (addr == uintptr(got))
682 if !ok {
683
684
685 sysFreeOS(got, physPageSize)
686 }
687 return
688 }
689
690 func GetNextArenaHint() uintptr {
691 return mheap_.arenaHints.addr
692 }
693
694 type G = g
695
696 type Sudog = sudog
697
698 func Getg() *G {
699 return getg()
700 }
701
702 func Goid() uint64 {
703 return getg().goid
704 }
705
706 func GIsWaitingOnMutex(gp *G) bool {
707 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
708 }
709
710 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
711
712
713 func PanicForTesting(b []byte, i int) byte {
714 return unexportedPanicForTesting(b, i)
715 }
716
717
718 func unexportedPanicForTesting(b []byte, i int) byte {
719 return b[i]
720 }
721
722 func G0StackOverflow() {
723 systemstack(func() {
724 g0 := getg()
725 sp := getcallersp()
726
727
728
729 g0.stack.lo = sp - 4096 - stackSystem
730 g0.stackguard0 = g0.stack.lo + stackGuard
731 g0.stackguard1 = g0.stackguard0
732
733 stackOverflow(nil)
734 })
735 }
736
737 func stackOverflow(x *byte) {
738 var buf [256]byte
739 stackOverflow(&buf[0])
740 }
741
742 func MapTombstoneCheck(m map[int]int) {
743
744
745
746 h := *(**hmap)(unsafe.Pointer(&m))
747 i := any(m)
748 t := *(**maptype)(unsafe.Pointer(&i))
749
750 for x := 0; x < 1<<h.B; x++ {
751 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
752 n := 0
753 for b := b0; b != nil; b = b.overflow(t) {
754 for i := 0; i < bucketCnt; i++ {
755 if b.tophash[i] != emptyRest {
756 n++
757 }
758 }
759 }
760 k := 0
761 for b := b0; b != nil; b = b.overflow(t) {
762 for i := 0; i < bucketCnt; i++ {
763 if k < n && b.tophash[i] == emptyRest {
764 panic("early emptyRest")
765 }
766 if k >= n && b.tophash[i] != emptyRest {
767 panic("late non-emptyRest")
768 }
769 if k == n-1 && b.tophash[i] == emptyOne {
770 panic("last non-emptyRest entry is emptyOne")
771 }
772 k++
773 }
774 }
775 }
776 }
777
778 func RunGetgThreadSwitchTest() {
779
780
781
782
783
784
785 ch := make(chan int)
786 go func(ch chan int) {
787 ch <- 5
788 LockOSThread()
789 }(ch)
790
791 g1 := getg()
792
793
794
795
796
797 <-ch
798
799 g2 := getg()
800 if g1 != g2 {
801 panic("g1 != g2")
802 }
803
804
805
806 g3 := getg()
807 if g1 != g3 {
808 panic("g1 != g3")
809 }
810 }
811
812 const (
813 PageSize = pageSize
814 PallocChunkPages = pallocChunkPages
815 PageAlloc64Bit = pageAlloc64Bit
816 PallocSumBytes = pallocSumBytes
817 )
818
819
820 type PallocSum pallocSum
821
822 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
823 func (m PallocSum) Start() uint { return pallocSum(m).start() }
824 func (m PallocSum) Max() uint { return pallocSum(m).max() }
825 func (m PallocSum) End() uint { return pallocSum(m).end() }
826
827
828 type PallocBits pallocBits
829
830 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
831 return (*pallocBits)(b).find(npages, searchIdx)
832 }
833 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
834 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
835 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
836 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
837
838
839
840 func SummarizeSlow(b *PallocBits) PallocSum {
841 var start, most, end uint
842
843 const N = uint(len(b)) * 64
844 for start < N && (*pageBits)(b).get(start) == 0 {
845 start++
846 }
847 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
848 end++
849 }
850 run := uint(0)
851 for i := uint(0); i < N; i++ {
852 if (*pageBits)(b).get(i) == 0 {
853 run++
854 } else {
855 run = 0
856 }
857 most = max(most, run)
858 }
859 return PackPallocSum(start, most, end)
860 }
861
862
863 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
864
865
866
867 func DiffPallocBits(a, b *PallocBits) []BitRange {
868 ba := (*pageBits)(a)
869 bb := (*pageBits)(b)
870
871 var d []BitRange
872 base, size := uint(0), uint(0)
873 for i := uint(0); i < uint(len(ba))*64; i++ {
874 if ba.get(i) != bb.get(i) {
875 if size == 0 {
876 base = i
877 }
878 size++
879 } else {
880 if size != 0 {
881 d = append(d, BitRange{base, size})
882 }
883 size = 0
884 }
885 }
886 if size != 0 {
887 d = append(d, BitRange{base, size})
888 }
889 return d
890 }
891
892
893
894
895 func StringifyPallocBits(b *PallocBits, r BitRange) string {
896 str := ""
897 for j := r.I; j < r.I+r.N; j++ {
898 if (*pageBits)(b).get(j) != 0 {
899 str += "1"
900 } else {
901 str += "0"
902 }
903 }
904 return str
905 }
906
907
908 type PallocData pallocData
909
910 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
911 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
912 }
913 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
914 func (d *PallocData) ScavengedSetRange(i, n uint) {
915 (*pallocData)(d).scavenged.setRange(i, n)
916 }
917 func (d *PallocData) PallocBits() *PallocBits {
918 return (*PallocBits)(&(*pallocData)(d).pallocBits)
919 }
920 func (d *PallocData) Scavenged() *PallocBits {
921 return (*PallocBits)(&(*pallocData)(d).scavenged)
922 }
923
924
925 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
926
927
928 type PageCache pageCache
929
930 const PageCachePages = pageCachePages
931
932 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
933 return PageCache(pageCache{base: base, cache: cache, scav: scav})
934 }
935 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
936 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
937 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
938 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
939 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
940 return (*pageCache)(c).alloc(npages)
941 }
942 func (c *PageCache) Flush(s *PageAlloc) {
943 cp := (*pageCache)(c)
944 sp := (*pageAlloc)(s)
945
946 systemstack(func() {
947
948
949 lock(sp.mheapLock)
950 cp.flush(sp)
951 unlock(sp.mheapLock)
952 })
953 }
954
955
956 type ChunkIdx chunkIdx
957
958
959
960 type PageAlloc pageAlloc
961
962 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
963 pp := (*pageAlloc)(p)
964
965 var addr, scav uintptr
966 systemstack(func() {
967
968
969 lock(pp.mheapLock)
970 addr, scav = pp.alloc(npages)
971 unlock(pp.mheapLock)
972 })
973 return addr, scav
974 }
975 func (p *PageAlloc) AllocToCache() PageCache {
976 pp := (*pageAlloc)(p)
977
978 var c PageCache
979 systemstack(func() {
980
981
982 lock(pp.mheapLock)
983 c = PageCache(pp.allocToCache())
984 unlock(pp.mheapLock)
985 })
986 return c
987 }
988 func (p *PageAlloc) Free(base, npages uintptr) {
989 pp := (*pageAlloc)(p)
990
991 systemstack(func() {
992
993
994 lock(pp.mheapLock)
995 pp.free(base, npages)
996 unlock(pp.mheapLock)
997 })
998 }
999 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
1000 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
1001 }
1002 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
1003 pp := (*pageAlloc)(p)
1004 systemstack(func() {
1005 r = pp.scavenge(nbytes, nil, true)
1006 })
1007 return
1008 }
1009 func (p *PageAlloc) InUse() []AddrRange {
1010 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
1011 for _, r := range p.inUse.ranges {
1012 ranges = append(ranges, AddrRange{r})
1013 }
1014 return ranges
1015 }
1016
1017
1018 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
1019 ci := chunkIdx(i)
1020 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
1021 }
1022
1023
1024 type AddrRange struct {
1025 addrRange
1026 }
1027
1028
1029 func MakeAddrRange(base, limit uintptr) AddrRange {
1030 return AddrRange{makeAddrRange(base, limit)}
1031 }
1032
1033
1034 func (a AddrRange) Base() uintptr {
1035 return a.addrRange.base.addr()
1036 }
1037
1038
1039 func (a AddrRange) Limit() uintptr {
1040 return a.addrRange.limit.addr()
1041 }
1042
1043
1044 func (a AddrRange) Equals(b AddrRange) bool {
1045 return a == b
1046 }
1047
1048
1049 func (a AddrRange) Size() uintptr {
1050 return a.addrRange.size()
1051 }
1052
1053
1054
1055
1056
1057 var testSysStat = &memstats.other_sys
1058
1059
1060 type AddrRanges struct {
1061 addrRanges
1062 mutable bool
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 func NewAddrRanges() AddrRanges {
1075 r := addrRanges{}
1076 r.init(testSysStat)
1077 return AddrRanges{r, true}
1078 }
1079
1080
1081
1082
1083
1084
1085 func MakeAddrRanges(a ...AddrRange) AddrRanges {
1086
1087
1088
1089
1090
1091 ranges := make([]addrRange, 0, len(a))
1092 total := uintptr(0)
1093 for _, r := range a {
1094 ranges = append(ranges, r.addrRange)
1095 total += r.Size()
1096 }
1097 return AddrRanges{addrRanges{
1098 ranges: ranges,
1099 totalBytes: total,
1100 sysStat: testSysStat,
1101 }, false}
1102 }
1103
1104
1105
1106 func (a *AddrRanges) Ranges() []AddrRange {
1107 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
1108 for _, r := range a.addrRanges.ranges {
1109 result = append(result, AddrRange{r})
1110 }
1111 return result
1112 }
1113
1114
1115
1116 func (a *AddrRanges) FindSucc(base uintptr) int {
1117 return a.findSucc(base)
1118 }
1119
1120
1121
1122
1123
1124 func (a *AddrRanges) Add(r AddrRange) {
1125 if !a.mutable {
1126 throw("attempt to mutate immutable AddrRanges")
1127 }
1128 a.add(r.addrRange)
1129 }
1130
1131
1132 func (a *AddrRanges) TotalBytes() uintptr {
1133 return a.addrRanges.totalBytes
1134 }
1135
1136
1137 type BitRange struct {
1138 I, N uint
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1156 p := new(pageAlloc)
1157
1158
1159 p.init(new(mutex), testSysStat, true)
1160 lockInit(p.mheapLock, lockRankMheap)
1161 for i, init := range chunks {
1162 addr := chunkBase(chunkIdx(i))
1163
1164
1165 systemstack(func() {
1166 lock(p.mheapLock)
1167 p.grow(addr, pallocChunkBytes)
1168 unlock(p.mheapLock)
1169 })
1170
1171
1172 ci := chunkIndex(addr)
1173 chunk := p.chunkOf(ci)
1174
1175
1176 chunk.scavenged.clearRange(0, pallocChunkPages)
1177
1178
1179
1180
1181 p.scav.index.alloc(ci, pallocChunkPages)
1182 p.scav.index.free(ci, 0, pallocChunkPages)
1183
1184
1185 if scav != nil {
1186 if scvg, ok := scav[i]; ok {
1187 for _, s := range scvg {
1188
1189
1190 if s.N != 0 {
1191 chunk.scavenged.setRange(s.I, s.N)
1192 }
1193 }
1194 }
1195 }
1196
1197
1198 for _, s := range init {
1199
1200
1201 if s.N != 0 {
1202 chunk.allocRange(s.I, s.N)
1203
1204
1205 p.scav.index.alloc(ci, s.N)
1206 }
1207 }
1208
1209
1210 systemstack(func() {
1211 lock(p.mheapLock)
1212 p.update(addr, pallocChunkPages, false, false)
1213 unlock(p.mheapLock)
1214 })
1215 }
1216
1217 return (*PageAlloc)(p)
1218 }
1219
1220
1221
1222
1223 func FreePageAlloc(pp *PageAlloc) {
1224 p := (*pageAlloc)(pp)
1225
1226
1227 if pageAlloc64Bit != 0 {
1228 for l := 0; l < summaryLevels; l++ {
1229 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1230 }
1231 } else {
1232 resSize := uintptr(0)
1233 for _, s := range p.summary {
1234 resSize += uintptr(cap(s)) * pallocSumBytes
1235 }
1236 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1237 }
1238
1239
1240 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1241
1242
1243
1244
1245
1246 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1247 testSysStat.add(-int64(p.summaryMappedReady))
1248
1249
1250 for i := range p.chunks {
1251 if x := p.chunks[i]; x != nil {
1252 p.chunks[i] = nil
1253
1254 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1255 }
1256 }
1257 }
1258
1259
1260
1261
1262
1263
1264
1265 var BaseChunkIdx = func() ChunkIdx {
1266 var prefix uintptr
1267 if pageAlloc64Bit != 0 {
1268 prefix = 0xc000
1269 } else {
1270 prefix = 0x100
1271 }
1272 baseAddr := prefix * pallocChunkBytes
1273 if goos.IsAix != 0 {
1274 baseAddr += arenaBaseOffset
1275 }
1276 return ChunkIdx(chunkIndex(baseAddr))
1277 }()
1278
1279
1280
1281 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1282 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1283 }
1284
1285 type BitsMismatch struct {
1286 Base uintptr
1287 Got, Want uint64
1288 }
1289
1290 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1291 ok = true
1292
1293
1294 systemstack(func() {
1295 getg().m.mallocing++
1296
1297
1298 lock(&mheap_.lock)
1299 chunkLoop:
1300 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1301 chunk := mheap_.pages.tryChunkOf(i)
1302 if chunk == nil {
1303 continue
1304 }
1305 for j := 0; j < pallocChunkPages/64; j++ {
1306
1307
1308
1309
1310
1311 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1312 got := chunk.scavenged[j]
1313 if want != got {
1314 ok = false
1315 if n >= len(mismatches) {
1316 break chunkLoop
1317 }
1318 mismatches[n] = BitsMismatch{
1319 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1320 Got: got,
1321 Want: want,
1322 }
1323 n++
1324 }
1325 }
1326 }
1327 unlock(&mheap_.lock)
1328
1329 getg().m.mallocing--
1330 })
1331 return
1332 }
1333
1334 func PageCachePagesLeaked() (leaked uintptr) {
1335 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1336
1337
1338 deadp := allp[len(allp):cap(allp)]
1339 for _, p := range deadp {
1340
1341
1342 if p != nil {
1343 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1344 }
1345 }
1346
1347 startTheWorld(stw)
1348 return
1349 }
1350
1351 type Mutex = mutex
1352
1353 var Lock = lock
1354 var Unlock = unlock
1355
1356 var MutexContended = mutexContended
1357
1358 func SemRootLock(addr *uint32) *mutex {
1359 root := semtable.rootFor(addr)
1360 return &root.lock
1361 }
1362
1363 var Semacquire = semacquire
1364 var Semrelease1 = semrelease1
1365
1366 func SemNwait(addr *uint32) uint32 {
1367 root := semtable.rootFor(addr)
1368 return root.nwait.Load()
1369 }
1370
1371 const SemTableSize = semTabSize
1372
1373
1374 type SemTable struct {
1375 semTable
1376 }
1377
1378
1379 func (t *SemTable) Enqueue(addr *uint32) {
1380 s := acquireSudog()
1381 s.releasetime = 0
1382 s.acquiretime = 0
1383 s.ticket = 0
1384 t.semTable.rootFor(addr).queue(addr, s, false)
1385 }
1386
1387
1388
1389
1390 func (t *SemTable) Dequeue(addr *uint32) bool {
1391 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1392 if s != nil {
1393 releaseSudog(s)
1394 return true
1395 }
1396 return false
1397 }
1398
1399
1400 type MSpan mspan
1401
1402
1403 func AllocMSpan() *MSpan {
1404 var s *mspan
1405 systemstack(func() {
1406 lock(&mheap_.lock)
1407 s = (*mspan)(mheap_.spanalloc.alloc())
1408 unlock(&mheap_.lock)
1409 })
1410 return (*MSpan)(s)
1411 }
1412
1413
1414 func FreeMSpan(s *MSpan) {
1415 systemstack(func() {
1416 lock(&mheap_.lock)
1417 mheap_.spanalloc.free(unsafe.Pointer(s))
1418 unlock(&mheap_.lock)
1419 })
1420 }
1421
1422 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1423 s := (*mspan)(ms)
1424 s.nelems = uint16(len(bits) * 8)
1425 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1426 result := s.countAlloc()
1427 s.gcmarkBits = nil
1428 return result
1429 }
1430
1431 const (
1432 TimeHistSubBucketBits = timeHistSubBucketBits
1433 TimeHistNumSubBuckets = timeHistNumSubBuckets
1434 TimeHistNumBuckets = timeHistNumBuckets
1435 TimeHistMinBucketBits = timeHistMinBucketBits
1436 TimeHistMaxBucketBits = timeHistMaxBucketBits
1437 )
1438
1439 type TimeHistogram timeHistogram
1440
1441
1442
1443
1444
1445 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1446 t := (*timeHistogram)(th)
1447 if bucket < 0 {
1448 return t.underflow.Load(), false
1449 }
1450 i := bucket*TimeHistNumSubBuckets + subBucket
1451 if i >= len(t.counts) {
1452 return t.overflow.Load(), false
1453 }
1454 return t.counts[i].Load(), true
1455 }
1456
1457 func (th *TimeHistogram) Record(duration int64) {
1458 (*timeHistogram)(th).record(duration)
1459 }
1460
1461 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1462
1463 func SetIntArgRegs(a int) int {
1464 lock(&finlock)
1465 old := intArgRegs
1466 if a >= 0 {
1467 intArgRegs = a
1468 }
1469 unlock(&finlock)
1470 return old
1471 }
1472
1473 func FinalizerGAsleep() bool {
1474 return fingStatus.Load()&fingWait != 0
1475 }
1476
1477
1478
1479
1480 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1481
1482
1483
1484 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1485 return gcTestIsReachable(ptrs...)
1486 }
1487
1488
1489
1490
1491
1492
1493
1494 func GCTestPointerClass(p unsafe.Pointer) string {
1495 return gcTestPointerClass(p)
1496 }
1497
1498 const Raceenabled = raceenabled
1499
1500 const (
1501 GCBackgroundUtilization = gcBackgroundUtilization
1502 GCGoalUtilization = gcGoalUtilization
1503 DefaultHeapMinimum = defaultHeapMinimum
1504 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1505 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1506 )
1507
1508 type GCController struct {
1509 gcControllerState
1510 }
1511
1512 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1513
1514
1515
1516
1517 g := Escape(new(GCController))
1518 g.gcControllerState.test = true
1519 g.init(int32(gcPercent), memoryLimit)
1520 return g
1521 }
1522
1523 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1524 trigger, _ := c.trigger()
1525 if c.heapMarked > trigger {
1526 trigger = c.heapMarked
1527 }
1528 c.maxStackScan.Store(stackSize)
1529 c.globalsScan.Store(globalsSize)
1530 c.heapLive.Store(trigger)
1531 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1532 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1533 }
1534
1535 func (c *GCController) AssistWorkPerByte() float64 {
1536 return c.assistWorkPerByte.Load()
1537 }
1538
1539 func (c *GCController) HeapGoal() uint64 {
1540 return c.heapGoal()
1541 }
1542
1543 func (c *GCController) HeapLive() uint64 {
1544 return c.heapLive.Load()
1545 }
1546
1547 func (c *GCController) HeapMarked() uint64 {
1548 return c.heapMarked
1549 }
1550
1551 func (c *GCController) Triggered() uint64 {
1552 return c.triggered
1553 }
1554
1555 type GCControllerReviseDelta struct {
1556 HeapLive int64
1557 HeapScan int64
1558 HeapScanWork int64
1559 StackScanWork int64
1560 GlobalsScanWork int64
1561 }
1562
1563 func (c *GCController) Revise(d GCControllerReviseDelta) {
1564 c.heapLive.Add(d.HeapLive)
1565 c.heapScan.Add(d.HeapScan)
1566 c.heapScanWork.Add(d.HeapScanWork)
1567 c.stackScanWork.Add(d.StackScanWork)
1568 c.globalsScanWork.Add(d.GlobalsScanWork)
1569 c.revise()
1570 }
1571
1572 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1573 c.assistTime.Store(assistTime)
1574 c.endCycle(elapsed, gomaxprocs, false)
1575 c.resetLive(bytesMarked)
1576 c.commit(false)
1577 }
1578
1579 func (c *GCController) AddIdleMarkWorker() bool {
1580 return c.addIdleMarkWorker()
1581 }
1582
1583 func (c *GCController) NeedIdleMarkWorker() bool {
1584 return c.needIdleMarkWorker()
1585 }
1586
1587 func (c *GCController) RemoveIdleMarkWorker() {
1588 c.removeIdleMarkWorker()
1589 }
1590
1591 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1592 c.setMaxIdleMarkWorkers(max)
1593 }
1594
1595 var alwaysFalse bool
1596 var escapeSink any
1597
1598 func Escape[T any](x T) T {
1599 if alwaysFalse {
1600 escapeSink = x
1601 }
1602 return x
1603 }
1604
1605
1606 func Acquirem() {
1607 acquirem()
1608 }
1609
1610 func Releasem() {
1611 releasem(getg().m)
1612 }
1613
1614 var Timediv = timediv
1615
1616 type PIController struct {
1617 piController
1618 }
1619
1620 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1621 return &PIController{piController{
1622 kp: kp,
1623 ti: ti,
1624 tt: tt,
1625 min: min,
1626 max: max,
1627 }}
1628 }
1629
1630 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1631 return c.piController.next(input, setpoint, period)
1632 }
1633
1634 const (
1635 CapacityPerProc = capacityPerProc
1636 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1637 )
1638
1639 type GCCPULimiter struct {
1640 limiter gcCPULimiterState
1641 }
1642
1643 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1644
1645
1646
1647
1648 l := Escape(new(GCCPULimiter))
1649 l.limiter.test = true
1650 l.limiter.resetCapacity(now, gomaxprocs)
1651 return l
1652 }
1653
1654 func (l *GCCPULimiter) Fill() uint64 {
1655 return l.limiter.bucket.fill
1656 }
1657
1658 func (l *GCCPULimiter) Capacity() uint64 {
1659 return l.limiter.bucket.capacity
1660 }
1661
1662 func (l *GCCPULimiter) Overflow() uint64 {
1663 return l.limiter.overflow
1664 }
1665
1666 func (l *GCCPULimiter) Limiting() bool {
1667 return l.limiter.limiting()
1668 }
1669
1670 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1671 return l.limiter.needUpdate(now)
1672 }
1673
1674 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1675 l.limiter.startGCTransition(enableGC, now)
1676 }
1677
1678 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1679 l.limiter.finishGCTransition(now)
1680 }
1681
1682 func (l *GCCPULimiter) Update(now int64) {
1683 l.limiter.update(now)
1684 }
1685
1686 func (l *GCCPULimiter) AddAssistTime(t int64) {
1687 l.limiter.addAssistTime(t)
1688 }
1689
1690 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1691 l.limiter.resetCapacity(now, nprocs)
1692 }
1693
1694 const ScavengePercent = scavengePercent
1695
1696 type Scavenger struct {
1697 Sleep func(int64) int64
1698 Scavenge func(uintptr) (uintptr, int64)
1699 ShouldStop func() bool
1700 GoMaxProcs func() int32
1701
1702 released atomic.Uintptr
1703 scavenger scavengerState
1704 stop chan<- struct{}
1705 done <-chan struct{}
1706 }
1707
1708 func (s *Scavenger) Start() {
1709 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1710 panic("must populate all stubs")
1711 }
1712
1713
1714 s.scavenger.sleepStub = s.Sleep
1715 s.scavenger.scavenge = s.Scavenge
1716 s.scavenger.shouldStop = s.ShouldStop
1717 s.scavenger.gomaxprocs = s.GoMaxProcs
1718
1719
1720 stop := make(chan struct{})
1721 s.stop = stop
1722 done := make(chan struct{})
1723 s.done = done
1724 go func() {
1725
1726 s.scavenger.init()
1727 s.scavenger.park()
1728 for {
1729 select {
1730 case <-stop:
1731 close(done)
1732 return
1733 default:
1734 }
1735 released, workTime := s.scavenger.run()
1736 if released == 0 {
1737 s.scavenger.park()
1738 continue
1739 }
1740 s.released.Add(released)
1741 s.scavenger.sleep(workTime)
1742 }
1743 }()
1744 if !s.BlockUntilParked(1e9 ) {
1745 panic("timed out waiting for scavenger to get ready")
1746 }
1747 }
1748
1749
1750
1751
1752
1753
1754
1755 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1756
1757
1758
1759
1760
1761 start := nanotime()
1762 for nanotime()-start < timeout {
1763 lock(&s.scavenger.lock)
1764 parked := s.scavenger.parked
1765 unlock(&s.scavenger.lock)
1766 if parked {
1767 return true
1768 }
1769 Gosched()
1770 }
1771 return false
1772 }
1773
1774
1775 func (s *Scavenger) Released() uintptr {
1776 return s.released.Load()
1777 }
1778
1779
1780 func (s *Scavenger) Wake() {
1781 s.scavenger.wake()
1782 }
1783
1784
1785
1786 func (s *Scavenger) Stop() {
1787 lock(&s.scavenger.lock)
1788 parked := s.scavenger.parked
1789 unlock(&s.scavenger.lock)
1790 if !parked {
1791 panic("tried to clean up scavenger that is not parked")
1792 }
1793 close(s.stop)
1794 s.Wake()
1795 <-s.done
1796 }
1797
1798 type ScavengeIndex struct {
1799 i scavengeIndex
1800 }
1801
1802 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1803 s := new(ScavengeIndex)
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 s.i.chunks = make([]atomicScavChunkData, max)
1816 s.i.min.Store(uintptr(min))
1817 s.i.max.Store(uintptr(max))
1818 s.i.minHeapIdx.Store(uintptr(min))
1819 s.i.test = true
1820 return s
1821 }
1822
1823 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1824 ci, off := s.i.find(force)
1825 return ChunkIdx(ci), off
1826 }
1827
1828 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1829 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1830 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1831
1832 if sc == ec {
1833
1834 s.i.alloc(sc, ei+1-si)
1835 } else {
1836
1837 s.i.alloc(sc, pallocChunkPages-si)
1838 for c := sc + 1; c < ec; c++ {
1839 s.i.alloc(c, pallocChunkPages)
1840 }
1841 s.i.alloc(ec, ei+1)
1842 }
1843 }
1844
1845 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1846 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1847 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1848
1849 if sc == ec {
1850
1851 s.i.free(sc, si, ei+1-si)
1852 } else {
1853
1854 s.i.free(sc, si, pallocChunkPages-si)
1855 for c := sc + 1; c < ec; c++ {
1856 s.i.free(c, 0, pallocChunkPages)
1857 }
1858 s.i.free(ec, 0, ei+1)
1859 }
1860 }
1861
1862 func (s *ScavengeIndex) ResetSearchAddrs() {
1863 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1864 addr, marked := a.Load()
1865 if marked {
1866 a.StoreUnmark(addr, addr)
1867 }
1868 a.Clear()
1869 }
1870 s.i.freeHWM = minOffAddr
1871 }
1872
1873 func (s *ScavengeIndex) NextGen() {
1874 s.i.nextGen()
1875 }
1876
1877 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1878 s.i.setEmpty(chunkIdx(ci))
1879 }
1880
1881 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1882 sc0 := scavChunkData{
1883 gen: gen,
1884 inUse: inUse,
1885 lastInUse: lastInUse,
1886 scavChunkFlags: scavChunkFlags(flags),
1887 }
1888 scp := sc0.pack()
1889 sc1 := unpackScavChunkData(scp)
1890 return sc0 == sc1
1891 }
1892
1893 const GTrackingPeriod = gTrackingPeriod
1894
1895 var ZeroBase = unsafe.Pointer(&zerobase)
1896
1897 const UserArenaChunkBytes = userArenaChunkBytes
1898
1899 type UserArena struct {
1900 arena *userArena
1901 }
1902
1903 func NewUserArena() *UserArena {
1904 return &UserArena{newUserArena()}
1905 }
1906
1907 func (a *UserArena) New(out *any) {
1908 i := efaceOf(out)
1909 typ := i._type
1910 if typ.Kind_&kindMask != kindPtr {
1911 panic("new result of non-ptr type")
1912 }
1913 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1914 i.data = a.arena.new(typ)
1915 }
1916
1917 func (a *UserArena) Slice(sl any, cap int) {
1918 a.arena.slice(sl, cap)
1919 }
1920
1921 func (a *UserArena) Free() {
1922 a.arena.free()
1923 }
1924
1925 func GlobalWaitingArenaChunks() int {
1926 n := 0
1927 systemstack(func() {
1928 lock(&mheap_.lock)
1929 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1930 n++
1931 }
1932 unlock(&mheap_.lock)
1933 })
1934 return n
1935 }
1936
1937 func UserArenaClone[T any](s T) T {
1938 return arena_heapify(s).(T)
1939 }
1940
1941 var AlignUp = alignUp
1942
1943 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1944 return blockUntilEmptyFinalizerQueue(timeout)
1945 }
1946
1947 func FrameStartLine(f *Frame) int {
1948 return f.startLine
1949 }
1950
1951
1952
1953 func PersistentAlloc(n uintptr) unsafe.Pointer {
1954 return persistentalloc(n, 0, &memstats.other_sys)
1955 }
1956
1957
1958
1959 func FPCallers(pcBuf []uintptr) int {
1960 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1961 }
1962
1963 const FramePointerEnabled = framepointer_enabled
1964
1965 var (
1966 IsPinned = isPinned
1967 GetPinCounter = pinnerGetPinCounter
1968 )
1969
1970 func SetPinnerLeakPanic(f func()) {
1971 pinnerLeakPanic = f
1972 }
1973 func GetPinnerLeakPanic() func() {
1974 return pinnerLeakPanic
1975 }
1976
1977 var testUintptr uintptr
1978
1979 func MyGenericFunc[T any]() {
1980 systemstack(func() {
1981 testUintptr = 4
1982 })
1983 }
1984
1985 func UnsafePoint(pc uintptr) bool {
1986 fi := findfunc(pc)
1987 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1988 switch v {
1989 case abi.UnsafePointUnsafe:
1990 return true
1991 case abi.UnsafePointSafe:
1992 return false
1993 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1994
1995
1996 return false
1997 default:
1998 var buf [20]byte
1999 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
2000 }
2001 }
2002
View as plain text