Source file
src/runtime/gc_test.go
Documentation: runtime
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/goexperiment"
10 "math/rand"
11 "os"
12 "reflect"
13 "runtime"
14 "runtime/debug"
15 "sort"
16 "strings"
17 "sync"
18 "sync/atomic"
19 "testing"
20 "time"
21 "unsafe"
22 )
23
24 func TestGcSys(t *testing.T) {
25 t.Skip("skipping known-flaky test; golang.org/issue/37331")
26 if os.Getenv("GOGC") == "off" {
27 t.Skip("skipping test; GOGC=off in environment")
28 }
29 got := runTestProg(t, "testprog", "GCSys")
30 want := "OK\n"
31 if got != want {
32 t.Fatalf("expected %q, but got %q", want, got)
33 }
34 }
35
36 func TestGcDeepNesting(t *testing.T) {
37 type T [2][2][2][2][2][2][2][2][2][2]*int
38 a := new(T)
39
40
41
42 t.Logf("%p", a)
43
44 a[0][0][0][0][0][0][0][0][0][0] = new(int)
45 *a[0][0][0][0][0][0][0][0][0][0] = 13
46 runtime.GC()
47 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
48 t.Fail()
49 }
50 }
51
52 func TestGcMapIndirection(t *testing.T) {
53 defer debug.SetGCPercent(debug.SetGCPercent(1))
54 runtime.GC()
55 type T struct {
56 a [256]int
57 }
58 m := make(map[T]T)
59 for i := 0; i < 2000; i++ {
60 var a T
61 a.a[0] = i
62 m[a] = T{}
63 }
64 }
65
66 func TestGcArraySlice(t *testing.T) {
67 type X struct {
68 buf [1]byte
69 nextbuf []byte
70 next *X
71 }
72 var head *X
73 for i := 0; i < 10; i++ {
74 p := &X{}
75 p.buf[0] = 42
76 p.next = head
77 if head != nil {
78 p.nextbuf = head.buf[:]
79 }
80 head = p
81 runtime.GC()
82 }
83 for p := head; p != nil; p = p.next {
84 if p.buf[0] != 42 {
85 t.Fatal("corrupted heap")
86 }
87 }
88 }
89
90 func TestGcRescan(t *testing.T) {
91 type X struct {
92 c chan error
93 nextx *X
94 }
95 type Y struct {
96 X
97 nexty *Y
98 p *int
99 }
100 var head *Y
101 for i := 0; i < 10; i++ {
102 p := &Y{}
103 p.c = make(chan error)
104 if head != nil {
105 p.nextx = &head.X
106 }
107 p.nexty = head
108 p.p = new(int)
109 *p.p = 42
110 head = p
111 runtime.GC()
112 }
113 for p := head; p != nil; p = p.nexty {
114 if *p.p != 42 {
115 t.Fatal("corrupted heap")
116 }
117 }
118 }
119
120 func TestGcLastTime(t *testing.T) {
121 ms := new(runtime.MemStats)
122 t0 := time.Now().UnixNano()
123 runtime.GC()
124 t1 := time.Now().UnixNano()
125 runtime.ReadMemStats(ms)
126 last := int64(ms.LastGC)
127 if t0 > last || last > t1 {
128 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
129 }
130 pause := ms.PauseNs[(ms.NumGC+255)%256]
131
132
133 if pause == 0 {
134 t.Logf("last GC pause was 0")
135 } else if pause > 10e9 {
136 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
137 }
138 }
139
140 var hugeSink any
141
142 func TestHugeGCInfo(t *testing.T) {
143
144
145 if hugeSink != nil {
146
147 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
148 hugeSink = new([n]*byte)
149 hugeSink = new([n]uintptr)
150 hugeSink = new(struct {
151 x float64
152 y [n]*byte
153 z []string
154 })
155 hugeSink = new(struct {
156 x float64
157 y [n]uintptr
158 z []string
159 })
160 }
161 }
162
163 func TestPeriodicGC(t *testing.T) {
164 if runtime.GOARCH == "wasm" {
165 t.Skip("no sysmon on wasm yet")
166 }
167
168
169 runtime.GC()
170
171 var ms1, ms2 runtime.MemStats
172 runtime.ReadMemStats(&ms1)
173
174
175 orig := *runtime.ForceGCPeriod
176 *runtime.ForceGCPeriod = 0
177
178
179
180
181
182 var numGCs uint32
183 const want = 2
184 for i := 0; i < 200 && numGCs < want; i++ {
185 time.Sleep(5 * time.Millisecond)
186
187
188 runtime.ReadMemStats(&ms2)
189 numGCs = ms2.NumGC - ms1.NumGC
190 }
191 *runtime.ForceGCPeriod = orig
192
193 if numGCs < want {
194 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
195 }
196 }
197
198 func TestGcZombieReporting(t *testing.T) {
199
200
201
202
203 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
204 want := "found pointer to free object"
205 if !strings.Contains(got, want) {
206 t.Fatalf("expected %q in output, but got %q", want, got)
207 }
208 }
209
210 func TestGCTestMoveStackOnNextCall(t *testing.T) {
211 t.Parallel()
212 var onStack int
213
214
215
216 for retry := 0; retry < 5; retry++ {
217 runtime.GCTestMoveStackOnNextCall()
218 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
219
220 return
221 }
222 }
223 t.Fatal("stack did not move")
224 }
225
226
227
228
229
230 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
231
232
233
234
235
236 new2 := uintptr(unsafe.Pointer(new))
237
238 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
239 if new2 == old {
240
241 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
242 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
243 }
244
245 return false
246 }
247 return true
248 }
249
250 func TestGCTestMoveStackRepeatedly(t *testing.T) {
251
252
253 for i := 0; i < 100; i++ {
254 runtime.GCTestMoveStackOnNextCall()
255 moveStack1(false)
256 }
257 }
258
259
260 func moveStack1(x bool) {
261
262 if x {
263 println("x")
264 }
265 }
266
267 func TestGCTestIsReachable(t *testing.T) {
268 var all, half []unsafe.Pointer
269 var want uint64
270 for i := 0; i < 16; i++ {
271
272
273 p := unsafe.Pointer(new(*int))
274 all = append(all, p)
275 if i%2 == 0 {
276 half = append(half, p)
277 want |= 1 << i
278 }
279 }
280
281 got := runtime.GCTestIsReachable(all...)
282 if want != got {
283 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
284 }
285 runtime.KeepAlive(half)
286 }
287
288 var pointerClassBSS *int
289 var pointerClassData = 42
290
291 func TestGCTestPointerClass(t *testing.T) {
292 t.Parallel()
293 check := func(p unsafe.Pointer, want string) {
294 t.Helper()
295 got := runtime.GCTestPointerClass(p)
296 if got != want {
297
298
299 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
300 }
301 }
302 var onStack int
303 var notOnStack int
304 check(unsafe.Pointer(&onStack), "stack")
305 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
306 check(unsafe.Pointer(&pointerClassBSS), "bss")
307 check(unsafe.Pointer(&pointerClassData), "data")
308 check(nil, "other")
309 }
310
311 func BenchmarkSetTypePtr(b *testing.B) {
312 benchSetType[*byte](b)
313 }
314
315 func BenchmarkSetTypePtr8(b *testing.B) {
316 benchSetType[[8]*byte](b)
317 }
318
319 func BenchmarkSetTypePtr16(b *testing.B) {
320 benchSetType[[16]*byte](b)
321 }
322
323 func BenchmarkSetTypePtr32(b *testing.B) {
324 benchSetType[[32]*byte](b)
325 }
326
327 func BenchmarkSetTypePtr64(b *testing.B) {
328 benchSetType[[64]*byte](b)
329 }
330
331 func BenchmarkSetTypePtr126(b *testing.B) {
332 benchSetType[[126]*byte](b)
333 }
334
335 func BenchmarkSetTypePtr128(b *testing.B) {
336 benchSetType[[128]*byte](b)
337 }
338
339 func BenchmarkSetTypePtrSlice(b *testing.B) {
340 benchSetTypeSlice[*byte](b, 1<<10)
341 }
342
343 type Node1 struct {
344 Value [1]uintptr
345 Left, Right *byte
346 }
347
348 func BenchmarkSetTypeNode1(b *testing.B) {
349 benchSetType[Node1](b)
350 }
351
352 func BenchmarkSetTypeNode1Slice(b *testing.B) {
353 benchSetTypeSlice[Node1](b, 32)
354 }
355
356 type Node8 struct {
357 Value [8]uintptr
358 Left, Right *byte
359 }
360
361 func BenchmarkSetTypeNode8(b *testing.B) {
362 benchSetType[Node8](b)
363 }
364
365 func BenchmarkSetTypeNode8Slice(b *testing.B) {
366 benchSetTypeSlice[Node8](b, 32)
367 }
368
369 type Node64 struct {
370 Value [64]uintptr
371 Left, Right *byte
372 }
373
374 func BenchmarkSetTypeNode64(b *testing.B) {
375 benchSetType[Node64](b)
376 }
377
378 func BenchmarkSetTypeNode64Slice(b *testing.B) {
379 benchSetTypeSlice[Node64](b, 32)
380 }
381
382 type Node64Dead struct {
383 Left, Right *byte
384 Value [64]uintptr
385 }
386
387 func BenchmarkSetTypeNode64Dead(b *testing.B) {
388 benchSetType[Node64Dead](b)
389 }
390
391 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
392 benchSetTypeSlice[Node64Dead](b, 32)
393 }
394
395 type Node124 struct {
396 Value [124]uintptr
397 Left, Right *byte
398 }
399
400 func BenchmarkSetTypeNode124(b *testing.B) {
401 benchSetType[Node124](b)
402 }
403
404 func BenchmarkSetTypeNode124Slice(b *testing.B) {
405 benchSetTypeSlice[Node124](b, 32)
406 }
407
408 type Node126 struct {
409 Value [126]uintptr
410 Left, Right *byte
411 }
412
413 func BenchmarkSetTypeNode126(b *testing.B) {
414 benchSetType[Node126](b)
415 }
416
417 func BenchmarkSetTypeNode126Slice(b *testing.B) {
418 benchSetTypeSlice[Node126](b, 32)
419 }
420
421 type Node128 struct {
422 Value [128]uintptr
423 Left, Right *byte
424 }
425
426 func BenchmarkSetTypeNode128(b *testing.B) {
427 benchSetType[Node128](b)
428 }
429
430 func BenchmarkSetTypeNode128Slice(b *testing.B) {
431 benchSetTypeSlice[Node128](b, 32)
432 }
433
434 type Node130 struct {
435 Value [130]uintptr
436 Left, Right *byte
437 }
438
439 func BenchmarkSetTypeNode130(b *testing.B) {
440 benchSetType[Node130](b)
441 }
442
443 func BenchmarkSetTypeNode130Slice(b *testing.B) {
444 benchSetTypeSlice[Node130](b, 32)
445 }
446
447 type Node1024 struct {
448 Value [1024]uintptr
449 Left, Right *byte
450 }
451
452 func BenchmarkSetTypeNode1024(b *testing.B) {
453 benchSetType[Node1024](b)
454 }
455
456 func BenchmarkSetTypeNode1024Slice(b *testing.B) {
457 benchSetTypeSlice[Node1024](b, 32)
458 }
459
460 func benchSetType[T any](b *testing.B) {
461 if goexperiment.AllocHeaders {
462 b.Skip("not supported with allocation headers experiment")
463 }
464 b.SetBytes(int64(unsafe.Sizeof(*new(T))))
465 runtime.BenchSetType[T](b.N, b.ResetTimer)
466 }
467
468 func benchSetTypeSlice[T any](b *testing.B, len int) {
469 if goexperiment.AllocHeaders {
470 b.Skip("not supported with allocation headers experiment")
471 }
472 b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
473 runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
474 }
475
476 func BenchmarkAllocation(b *testing.B) {
477 type T struct {
478 x, y *byte
479 }
480 ngo := runtime.GOMAXPROCS(0)
481 work := make(chan bool, b.N+ngo)
482 result := make(chan *T)
483 for i := 0; i < b.N; i++ {
484 work <- true
485 }
486 for i := 0; i < ngo; i++ {
487 work <- false
488 }
489 for i := 0; i < ngo; i++ {
490 go func() {
491 var x *T
492 for <-work {
493 for i := 0; i < 1000; i++ {
494 x = &T{}
495 }
496 }
497 result <- x
498 }()
499 }
500 for i := 0; i < ngo; i++ {
501 <-result
502 }
503 }
504
505 func TestPrintGC(t *testing.T) {
506 if testing.Short() {
507 t.Skip("Skipping in short mode")
508 }
509 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
510 done := make(chan bool)
511 go func() {
512 for {
513 select {
514 case <-done:
515 return
516 default:
517 runtime.GC()
518 }
519 }
520 }()
521 for i := 0; i < 1e4; i++ {
522 func() {
523 defer print("")
524 }()
525 }
526 close(done)
527 }
528
529 func testTypeSwitch(x any) error {
530 switch y := x.(type) {
531 case nil:
532
533 case error:
534 return y
535 }
536 return nil
537 }
538
539 func testAssert(x any) error {
540 if y, ok := x.(error); ok {
541 return y
542 }
543 return nil
544 }
545
546 func testAssertVar(x any) error {
547 var y, ok = x.(error)
548 if ok {
549 return y
550 }
551 return nil
552 }
553
554 var a bool
555
556
557 func testIfaceEqual(x any) {
558 if x == "abc" {
559 a = true
560 }
561 }
562
563 func TestPageAccounting(t *testing.T) {
564
565
566
567 const blockSize = 64 << 10
568 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
569 for i := range blocks {
570 blocks[i] = new([blockSize]byte)
571 }
572
573
574 pagesInUse, counted := runtime.CountPagesInUse()
575 if pagesInUse != counted {
576 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
577 }
578 }
579
580 func init() {
581
582 *runtime.DoubleCheckReadMemStats = true
583 }
584
585 func TestReadMemStats(t *testing.T) {
586 base, slow := runtime.ReadMemStatsSlow()
587 if base != slow {
588 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
589 t.Fatal("memstats mismatch")
590 }
591 }
592
593 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
594 typ := got.Type()
595 switch typ.Kind() {
596 case reflect.Array, reflect.Slice:
597 if got.Len() != want.Len() {
598 t.Logf("len(%s): got %v, want %v", prefix, got, want)
599 return
600 }
601 for i := 0; i < got.Len(); i++ {
602 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
603 }
604 case reflect.Struct:
605 for i := 0; i < typ.NumField(); i++ {
606 gf, wf := got.Field(i), want.Field(i)
607 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
608 }
609 case reflect.Map:
610 t.Fatal("not implemented: logDiff for map")
611 default:
612 if got.Interface() != want.Interface() {
613 t.Logf("%s: got %v, want %v", prefix, got, want)
614 }
615 }
616 }
617
618 func BenchmarkReadMemStats(b *testing.B) {
619 var ms runtime.MemStats
620 const heapSize = 100 << 20
621 x := make([]*[1024]byte, heapSize/1024)
622 for i := range x {
623 x[i] = new([1024]byte)
624 }
625
626 b.ResetTimer()
627 for i := 0; i < b.N; i++ {
628 runtime.ReadMemStats(&ms)
629 }
630
631 runtime.KeepAlive(x)
632 }
633
634 func applyGCLoad(b *testing.B) func() {
635
636
637
638
639 maxProcs := runtime.GOMAXPROCS(-1)
640 if maxProcs == 1 {
641 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
642 }
643
644
645 type node struct {
646 children [16]*node
647 }
648 var buildTree func(depth int) *node
649 buildTree = func(depth int) *node {
650 tree := new(node)
651 if depth != 0 {
652 for i := range tree.children {
653 tree.children[i] = buildTree(depth - 1)
654 }
655 }
656 return tree
657 }
658
659
660 done := make(chan struct{})
661 var wg sync.WaitGroup
662 for i := 0; i < maxProcs-1; i++ {
663 wg.Add(1)
664 go func() {
665 defer wg.Done()
666 var hold *node
667 loop:
668 for {
669 hold = buildTree(5)
670 select {
671 case <-done:
672 break loop
673 default:
674 }
675 }
676 runtime.KeepAlive(hold)
677 }()
678 }
679 return func() {
680 close(done)
681 wg.Wait()
682 }
683 }
684
685 func BenchmarkReadMemStatsLatency(b *testing.B) {
686 stop := applyGCLoad(b)
687
688
689 latencies := make([]time.Duration, 0, 1024)
690
691
692
693 b.ResetTimer()
694 var ms runtime.MemStats
695 for i := 0; i < b.N; i++ {
696
697
698 time.Sleep(100 * time.Millisecond)
699 start := time.Now()
700 runtime.ReadMemStats(&ms)
701 latencies = append(latencies, time.Since(start))
702 }
703
704
705
706 b.StopTimer()
707 stop()
708
709
710
711
712 b.ReportMetric(0, "ns/op")
713 b.ReportMetric(0, "B/op")
714 b.ReportMetric(0, "allocs/op")
715
716
717 sort.Slice(latencies, func(i, j int) bool {
718 return latencies[i] < latencies[j]
719 })
720 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
721 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
722 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
723 }
724
725 func TestUserForcedGC(t *testing.T) {
726
727 defer debug.SetGCPercent(debug.SetGCPercent(-1))
728
729 var ms1, ms2 runtime.MemStats
730 runtime.ReadMemStats(&ms1)
731 runtime.GC()
732 runtime.ReadMemStats(&ms2)
733 if ms1.NumGC == ms2.NumGC {
734 t.Fatalf("runtime.GC() did not trigger GC")
735 }
736 if ms1.NumForcedGC == ms2.NumForcedGC {
737 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
738 }
739 }
740
741 func writeBarrierBenchmark(b *testing.B, f func()) {
742 runtime.GC()
743 var ms runtime.MemStats
744 runtime.ReadMemStats(&ms)
745
746
747
748
749 var stop uint32
750 done := make(chan bool)
751 go func() {
752 for atomic.LoadUint32(&stop) == 0 {
753 runtime.GC()
754 }
755 close(done)
756 }()
757 defer func() {
758 atomic.StoreUint32(&stop, 1)
759 <-done
760 }()
761
762 b.ResetTimer()
763 f()
764 b.StopTimer()
765 }
766
767 func BenchmarkWriteBarrier(b *testing.B) {
768 if runtime.GOMAXPROCS(-1) < 2 {
769
770 b.Skip("need GOMAXPROCS >= 2")
771 }
772
773
774
775 type node struct {
776 l, r *node
777 }
778 var wbRoots []*node
779 var mkTree func(level int) *node
780 mkTree = func(level int) *node {
781 if level == 0 {
782 return nil
783 }
784 n := &node{mkTree(level - 1), mkTree(level - 1)}
785 if level == 10 {
786
787
788
789 wbRoots = append(wbRoots, n)
790 }
791 return n
792 }
793 const depth = 22
794 root := mkTree(22)
795
796 writeBarrierBenchmark(b, func() {
797 var stack [depth]*node
798 tos := -1
799
800
801 for i := 0; i < b.N; i += 2 {
802 if tos == -1 {
803 stack[0] = root
804 tos = 0
805 }
806
807
808 n := stack[tos]
809 if n.l == nil {
810 tos--
811 } else {
812 n.l, n.r = n.r, n.l
813 stack[tos] = n.l
814 stack[tos+1] = n.r
815 tos++
816 }
817
818 if i%(1<<12) == 0 {
819
820 runtime.Gosched()
821 }
822 }
823 })
824
825 runtime.KeepAlive(wbRoots)
826 }
827
828 func BenchmarkBulkWriteBarrier(b *testing.B) {
829 if runtime.GOMAXPROCS(-1) < 2 {
830
831 b.Skip("need GOMAXPROCS >= 2")
832 }
833
834
835 const heapSize = 64 << 20
836 type obj [16]*byte
837 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
838 for i := range ptrs {
839 ptrs[i] = new(obj)
840 }
841
842 writeBarrierBenchmark(b, func() {
843 const blockSize = 1024
844 var pos int
845 for i := 0; i < b.N; i += blockSize {
846
847 block := ptrs[pos : pos+blockSize]
848 first := block[0]
849 copy(block, block[1:])
850 block[blockSize-1] = first
851
852 pos += blockSize
853 if pos+blockSize > len(ptrs) {
854 pos = 0
855 }
856
857 runtime.Gosched()
858 }
859 })
860
861 runtime.KeepAlive(ptrs)
862 }
863
864 func BenchmarkScanStackNoLocals(b *testing.B) {
865 var ready sync.WaitGroup
866 teardown := make(chan bool)
867 for j := 0; j < 10; j++ {
868 ready.Add(1)
869 go func() {
870 x := 100000
871 countpwg(&x, &ready, teardown)
872 }()
873 }
874 ready.Wait()
875 b.ResetTimer()
876 for i := 0; i < b.N; i++ {
877 b.StartTimer()
878 runtime.GC()
879 runtime.GC()
880 b.StopTimer()
881 }
882 close(teardown)
883 }
884
885 func BenchmarkMSpanCountAlloc(b *testing.B) {
886
887 s := runtime.AllocMSpan()
888 defer runtime.FreeMSpan(s)
889
890
891
892
893 for _, n := range []int{8, 16, 32, 64, 128} {
894 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
895
896 bits := make([]byte, n)
897 rand.Read(bits)
898
899 b.ResetTimer()
900 for i := 0; i < b.N; i++ {
901 runtime.MSpanCountAlloc(s, bits)
902 }
903 })
904 }
905 }
906
907 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
908 if *n == 0 {
909 ready.Done()
910 <-teardown
911 return
912 }
913 *n--
914 countpwg(n, ready, teardown)
915 }
916
917 func TestMemoryLimit(t *testing.T) {
918 if testing.Short() {
919 t.Skip("stress test that takes time to run")
920 }
921 if runtime.NumCPU() < 4 {
922 t.Skip("want at least 4 CPUs for this test")
923 }
924 got := runTestProg(t, "testprog", "GCMemoryLimit")
925 want := "OK\n"
926 if got != want {
927 t.Fatalf("expected %q, but got %q", want, got)
928 }
929 }
930
931 func TestMemoryLimitNoGCPercent(t *testing.T) {
932 if testing.Short() {
933 t.Skip("stress test that takes time to run")
934 }
935 if runtime.NumCPU() < 4 {
936 t.Skip("want at least 4 CPUs for this test")
937 }
938 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
939 want := "OK\n"
940 if got != want {
941 t.Fatalf("expected %q, but got %q", want, got)
942 }
943 }
944
945 func TestMyGenericFunc(t *testing.T) {
946 runtime.MyGenericFunc[int]()
947 }
948
View as plain text