Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 package runtime
16
17 import (
18 "internal/abi"
19 "internal/goarch"
20 "internal/goos"
21 "runtime/internal/atomic"
22 "runtime/internal/sys"
23 "unsafe"
24 )
25
26
27 const (
28 traceEvNone = 0
29 traceEvBatch = 1
30 traceEvFrequency = 2
31 traceEvStack = 3
32 traceEvGomaxprocs = 4
33 traceEvProcStart = 5
34 traceEvProcStop = 6
35 traceEvGCStart = 7
36 traceEvGCDone = 8
37 traceEvSTWStart = 9
38 traceEvSTWDone = 10
39 traceEvGCSweepStart = 11
40 traceEvGCSweepDone = 12
41 traceEvGoCreate = 13
42 traceEvGoStart = 14
43 traceEvGoEnd = 15
44 traceEvGoStop = 16
45 traceEvGoSched = 17
46 traceEvGoPreempt = 18
47 traceEvGoSleep = 19
48 traceEvGoBlock = 20
49 traceEvGoUnblock = 21
50 traceEvGoBlockSend = 22
51 traceEvGoBlockRecv = 23
52 traceEvGoBlockSelect = 24
53 traceEvGoBlockSync = 25
54 traceEvGoBlockCond = 26
55 traceEvGoBlockNet = 27
56 traceEvGoSysCall = 28
57 traceEvGoSysExit = 29
58 traceEvGoSysBlock = 30
59 traceEvGoWaiting = 31
60 traceEvGoInSyscall = 32
61 traceEvHeapAlloc = 33
62 traceEvHeapGoal = 34
63 traceEvTimerGoroutine = 35
64 traceEvFutileWakeup = 36
65 traceEvString = 37
66 traceEvGoStartLocal = 38
67 traceEvGoUnblockLocal = 39
68 traceEvGoSysExitLocal = 40
69 traceEvGoStartLabel = 41
70 traceEvGoBlockGC = 42
71 traceEvGCMarkAssistStart = 43
72 traceEvGCMarkAssistDone = 44
73 traceEvUserTaskCreate = 45
74 traceEvUserTaskEnd = 46
75 traceEvUserRegion = 47
76 traceEvUserLog = 48
77 traceEvCPUSample = 49
78 traceEvCount = 50
79
80
81
82 )
83
84
85
86
87
88
89
90
91 type traceBlockReason uint8
92
93
94
95 const (
96 traceBlockGeneric traceBlockReason = traceEvGoBlock
97 traceBlockForever = traceEvGoStop
98 traceBlockNet = traceEvGoBlockNet
99 traceBlockSelect = traceEvGoBlockSelect
100 traceBlockCondWait = traceEvGoBlockCond
101 traceBlockSync = traceEvGoBlockSync
102 traceBlockChanSend = traceEvGoBlockSend
103 traceBlockChanRecv = traceEvGoBlockRecv
104 traceBlockGCMarkAssist = traceEvGoBlockGC
105 traceBlockGCSweep = traceEvGoBlock
106 traceBlockSystemGoroutine = traceEvGoBlock
107 traceBlockPreempted = traceEvGoBlock
108 traceBlockDebugCall = traceEvGoBlock
109 traceBlockUntilGCEnds = traceEvGoBlock
110 traceBlockSleep = traceEvGoSleep
111 )
112
113 const (
114
115
116
117
118
119
120
121 traceTimeDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
122
123
124
125 traceStackSize = 128
126
127 traceGlobProc = -1
128
129 traceBytesPerNumber = 10
130
131 traceArgCountShift = 6
132 )
133
134
135 var trace struct {
136
137
138 lock mutex
139 enabled bool
140 shutdown bool
141 headerWritten bool
142 footerWritten bool
143 shutdownSema uint32
144 seqStart uint64
145 startTicks int64
146 endTicks int64
147 startNanotime int64
148 endNanotime int64
149 startTime traceTime
150 endTime traceTime
151 seqGC uint64
152 reading traceBufPtr
153 empty traceBufPtr
154 fullHead traceBufPtr
155 fullTail traceBufPtr
156 stackTab traceStackTable
157
158
159
160
161
162
163
164
165
166
167 cpuLogRead *profBuf
168
169
170
171 cpuLogBuf traceBufPtr
172
173 reader atomic.Pointer[g]
174
175 signalLock atomic.Uint32
176 cpuLogWrite *profBuf
177
178
179
180
181
182
183
184 stringsLock mutex
185 strings map[string]uint64
186 stringSeq uint64
187
188
189 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
190
191 bufLock mutex
192 buf traceBufPtr
193 }
194
195
196 type gTraceState struct {
197 sysExitTime traceTime
198 tracedSyscallEnter bool
199 seq uint64
200 lastP puintptr
201 }
202
203
204 func (s *gTraceState) reset() {}
205
206
207 type mTraceState struct {
208 startingTrace bool
209 tracedSTWStart bool
210 }
211
212
213 type pTraceState struct {
214 buf traceBufPtr
215
216
217
218
219 inSweep bool
220
221
222
223 swept, reclaimed uintptr
224 }
225
226
227 func traceLockInit() {
228 lockInit(&trace.bufLock, lockRankTraceBuf)
229 lockInit(&trace.stringsLock, lockRankTraceStrings)
230 lockInit(&trace.lock, lockRankTrace)
231 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
232 }
233
234
235 type traceBufHeader struct {
236 link traceBufPtr
237 lastTime traceTime
238 pos int
239 stk [traceStackSize]uintptr
240 }
241
242
243 type traceBuf struct {
244 _ sys.NotInHeap
245 traceBufHeader
246 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
247 }
248
249
250
251
252
253
254
255
256 type traceBufPtr uintptr
257
258 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
259 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
260 func traceBufPtrOf(b *traceBuf) traceBufPtr {
261 return traceBufPtr(unsafe.Pointer(b))
262 }
263
264
265
266
267
268
269 func traceEnabled() bool {
270 return trace.enabled
271 }
272
273
274
275
276 func traceShuttingDown() bool {
277 return trace.shutdown
278 }
279
280
281
282
283
284
285
286
287 type traceLocker struct {
288 enabled bool
289 }
290
291
292
293
294
295
296
297
298
299 func traceAcquire() traceLocker {
300 if !traceEnabled() {
301 return traceLocker{false}
302 }
303 return traceLocker{true}
304 }
305
306
307
308
309
310
311 func (tl traceLocker) ok() bool {
312 return tl.enabled
313 }
314
315
316
317
318
319
320
321
322
323 func traceRelease(tl traceLocker) {
324 }
325
326
327
328
329
330
331 func StartTrace() error {
332
333
334
335
336
337 stw := stopTheWorldGC(stwStartTrace)
338
339
340 lock(&sched.sysmonlock)
341
342
343
344
345
346
347 lock(&trace.bufLock)
348
349 if trace.enabled || trace.shutdown {
350 unlock(&trace.bufLock)
351 unlock(&sched.sysmonlock)
352 startTheWorldGC(stw)
353 return errorString("tracing is already enabled")
354 }
355
356
357
358
359
360
361
362
363 mp := getg().m
364 mp.trace.startingTrace = true
365
366
367 stkBuf := make([]uintptr, traceStackSize)
368 stackID := traceStackID(mp, stkBuf, 2)
369
370 profBuf := newProfBuf(2, profBufWordCount, profBufTagCount)
371 trace.cpuLogRead = profBuf
372
373
374
375
376
377
378
379
380 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
381
382
383 forEachGRace(func(gp *g) {
384 status := readgstatus(gp)
385 if status != _Gdead {
386 gp.trace.seq = 0
387 gp.trace.lastP = getg().m.p
388
389 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(gp.startpc) + sys.PCQuantum})
390 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
391 }
392 if status == _Gwaiting {
393
394 gp.trace.seq++
395 traceEvent(traceEvGoWaiting, -1, gp.goid)
396 }
397 if status == _Gsyscall {
398 gp.trace.seq++
399 gp.trace.tracedSyscallEnter = true
400 traceEvent(traceEvGoInSyscall, -1, gp.goid)
401 } else if status == _Gdead && gp.m != nil && gp.m.isextra {
402
403
404
405 gp.trace.seq = 0
406 gp.trace.lastP = getg().m.p
407
408 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(0) + sys.PCQuantum})
409 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
410 gp.trace.seq++
411 gp.trace.tracedSyscallEnter = true
412 traceEvent(traceEvGoInSyscall, -1, gp.goid)
413 } else {
414
415
416
417
418
419
420 gp.trace.tracedSyscallEnter = false
421 }
422 })
423
424 tl := traceLocker{}
425 tl.ProcStart()
426 tl.GoStart()
427
428
429
430
431 trace.startTime = traceClockNow()
432 trace.startTicks = cputicks()
433 trace.startNanotime = nanotime()
434 trace.headerWritten = false
435 trace.footerWritten = false
436
437
438
439
440 trace.stringSeq = 0
441 trace.strings = make(map[string]uint64)
442
443 trace.seqGC = 0
444 mp.trace.startingTrace = false
445 trace.enabled = true
446
447
448 _, pid, bufp := traceAcquireBuffer()
449 for i, label := range gcMarkWorkerModeStrings[:] {
450 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
451 }
452 traceReleaseBuffer(mp, pid)
453
454 unlock(&trace.bufLock)
455
456 unlock(&sched.sysmonlock)
457
458
459
460
461
462 tl.HeapGoal()
463
464 startTheWorldGC(stw)
465 return nil
466 }
467
468
469
470 func StopTrace() {
471
472
473 stw := stopTheWorldGC(stwStopTrace)
474
475
476 lock(&sched.sysmonlock)
477
478
479 lock(&trace.bufLock)
480
481 if !trace.enabled {
482 unlock(&trace.bufLock)
483 unlock(&sched.sysmonlock)
484 startTheWorldGC(stw)
485 return
486 }
487
488
489
490 tl := traceLocker{}
491 tl.GoSched()
492
493 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
494 trace.cpuLogRead.close()
495 traceReadCPU()
496
497
498
499 for _, p := range allp[:cap(allp)] {
500 buf := p.trace.buf
501 if buf != 0 {
502 traceFullQueue(buf)
503 p.trace.buf = 0
504 }
505 }
506 if trace.buf != 0 {
507 buf := trace.buf
508 trace.buf = 0
509 if buf.ptr().pos != 0 {
510 traceFullQueue(buf)
511 }
512 }
513 if trace.cpuLogBuf != 0 {
514 buf := trace.cpuLogBuf
515 trace.cpuLogBuf = 0
516 if buf.ptr().pos != 0 {
517 traceFullQueue(buf)
518 }
519 }
520
521
522
523
524
525
526 for {
527 trace.endTime = traceClockNow()
528 trace.endTicks = cputicks()
529 trace.endNanotime = nanotime()
530
531 if trace.endNanotime != trace.startNanotime || faketime != 0 {
532 break
533 }
534 osyield()
535 }
536
537 trace.enabled = false
538 trace.shutdown = true
539 unlock(&trace.bufLock)
540
541 unlock(&sched.sysmonlock)
542
543 startTheWorldGC(stw)
544
545
546
547 semacquire(&trace.shutdownSema)
548 if raceenabled {
549 raceacquire(unsafe.Pointer(&trace.shutdownSema))
550 }
551
552 systemstack(func() {
553
554 lock(&trace.lock)
555 for _, p := range allp[:cap(allp)] {
556 if p.trace.buf != 0 {
557 throw("trace: non-empty trace buffer in proc")
558 }
559 }
560 if trace.buf != 0 {
561 throw("trace: non-empty global trace buffer")
562 }
563 if trace.fullHead != 0 || trace.fullTail != 0 {
564 throw("trace: non-empty full trace buffer")
565 }
566 if trace.reading != 0 || trace.reader.Load() != nil {
567 throw("trace: reading after shutdown")
568 }
569 for trace.empty != 0 {
570 buf := trace.empty
571 trace.empty = buf.ptr().link
572 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
573 }
574 trace.strings = nil
575 trace.shutdown = false
576 trace.cpuLogRead = nil
577 unlock(&trace.lock)
578 })
579 }
580
581
582
583
584
585
586 func ReadTrace() []byte {
587 top:
588 var buf []byte
589 var park bool
590 systemstack(func() {
591 buf, park = readTrace0()
592 })
593 if park {
594 gopark(func(gp *g, _ unsafe.Pointer) bool {
595 if !trace.reader.CompareAndSwapNoWB(nil, gp) {
596
597
598 return false
599 }
600
601 if g2 := traceReader(); gp == g2 {
602
603
604
605 return false
606 } else if g2 != nil {
607 printlock()
608 println("runtime: got trace reader", g2, g2.goid)
609 throw("unexpected trace reader")
610 }
611
612 return true
613 }, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
614 goto top
615 }
616
617 return buf
618 }
619
620
621
622
623
624 func readTrace0() (buf []byte, park bool) {
625 if raceenabled {
626
627 if getg().racectx != 0 {
628 throw("expected racectx == 0")
629 }
630 getg().racectx = getg().m.curg.racectx
631
632
633 defer func() { getg().racectx = 0 }()
634 }
635
636
637
638
639
640 if !trace.footerWritten && !trace.shutdown {
641 traceReadCPU()
642 }
643
644
645
646
647 lock(&trace.lock)
648
649 if trace.reader.Load() != nil {
650
651
652
653 unlock(&trace.lock)
654 println("runtime: ReadTrace called from multiple goroutines simultaneously")
655 return nil, false
656 }
657
658 if buf := trace.reading; buf != 0 {
659 buf.ptr().link = trace.empty
660 trace.empty = buf
661 trace.reading = 0
662 }
663
664 if !trace.headerWritten {
665 trace.headerWritten = true
666 unlock(&trace.lock)
667 return []byte("go 1.21 trace\x00\x00\x00"), false
668 }
669
670 if trace.fullHead == 0 && !trace.shutdown {
671
672
673
674 unlock(&trace.lock)
675 return nil, true
676 }
677 newFull:
678 assertLockHeld(&trace.lock)
679
680 if trace.fullHead != 0 {
681 buf := traceFullDequeue()
682 trace.reading = buf
683 unlock(&trace.lock)
684 return buf.ptr().arr[:buf.ptr().pos], false
685 }
686
687
688 if !trace.footerWritten {
689 trace.footerWritten = true
690 freq := (float64(trace.endTicks-trace.startTicks) / traceTimeDiv) / (float64(trace.endNanotime-trace.startNanotime) / 1e9)
691 if freq <= 0 {
692 throw("trace: ReadTrace got invalid frequency")
693 }
694 unlock(&trace.lock)
695
696
697 bufp := traceFlush(0, 0)
698 buf := bufp.ptr()
699 buf.byte(traceEvFrequency | 0<<traceArgCountShift)
700 buf.varint(uint64(freq))
701
702
703
704
705 bufp = trace.stackTab.dump(bufp)
706
707
708 lock(&trace.lock)
709 traceFullQueue(bufp)
710 goto newFull
711 }
712
713 if trace.shutdown {
714 unlock(&trace.lock)
715 if raceenabled {
716
717
718
719 racerelease(unsafe.Pointer(&trace.shutdownSema))
720 }
721
722 semrelease(&trace.shutdownSema)
723 return nil, false
724 }
725
726 unlock(&trace.lock)
727 println("runtime: spurious wakeup of trace reader")
728 return nil, false
729 }
730
731
732
733
734
735
736
737 func traceReader() *g {
738
739 if traceReaderAvailable() == nil {
740 return nil
741 }
742 lock(&trace.lock)
743 gp := traceReaderAvailable()
744 if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
745 unlock(&trace.lock)
746 return nil
747 }
748 unlock(&trace.lock)
749 return gp
750 }
751
752
753
754
755 func traceReaderAvailable() *g {
756 if trace.fullHead != 0 || trace.shutdown {
757 return trace.reader.Load()
758 }
759 return nil
760 }
761
762
763
764
765
766
767 func traceProcFree(pp *p) {
768 buf := pp.trace.buf
769 pp.trace.buf = 0
770 if buf == 0 {
771 return
772 }
773 lock(&trace.lock)
774 traceFullQueue(buf)
775 unlock(&trace.lock)
776 }
777
778
779
780
781 func traceThreadDestroy(_ *m) {
782
783 }
784
785
786 func traceFullQueue(buf traceBufPtr) {
787 buf.ptr().link = 0
788 if trace.fullHead == 0 {
789 trace.fullHead = buf
790 } else {
791 trace.fullTail.ptr().link = buf
792 }
793 trace.fullTail = buf
794 }
795
796
797 func traceFullDequeue() traceBufPtr {
798 buf := trace.fullHead
799 if buf == 0 {
800 return 0
801 }
802 trace.fullHead = buf.ptr().link
803 if trace.fullHead == 0 {
804 trace.fullTail = 0
805 }
806 buf.ptr().link = 0
807 return buf
808 }
809
810
811
812
813
814
815 func traceEvent(ev byte, skip int, args ...uint64) {
816 mp, pid, bufp := traceAcquireBuffer()
817
818
819
820
821
822
823
824
825
826
827
828 if !trace.enabled && !mp.trace.startingTrace {
829 traceReleaseBuffer(mp, pid)
830 return
831 }
832
833 if skip > 0 {
834 if getg() == mp.curg {
835 skip++
836 }
837 }
838 traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
839 traceReleaseBuffer(mp, pid)
840 }
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
860 buf := bufp.ptr()
861
862 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
863 if buf == nil || len(buf.arr)-buf.pos < maxSize {
864 systemstack(func() {
865 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
866 })
867 bufp.set(buf)
868 }
869
870 ts := traceClockNow()
871 if ts <= buf.lastTime {
872 ts = buf.lastTime + 1
873 }
874 tsDiff := uint64(ts - buf.lastTime)
875 buf.lastTime = ts
876 narg := byte(len(args))
877 if stackID != 0 || skip >= 0 {
878 narg++
879 }
880
881
882 if narg > 3 {
883 narg = 3
884 }
885 startPos := buf.pos
886 buf.byte(ev | narg<<traceArgCountShift)
887 var lenp *byte
888 if narg == 3 {
889
890 buf.varint(0)
891 lenp = &buf.arr[buf.pos-1]
892 }
893 buf.varint(tsDiff)
894 for _, a := range args {
895 buf.varint(a)
896 }
897 if stackID != 0 {
898 buf.varint(uint64(stackID))
899 } else if skip == 0 {
900 buf.varint(0)
901 } else if skip > 0 {
902 buf.varint(traceStackID(mp, buf.stk[:], skip))
903 }
904 evSize := buf.pos - startPos
905 if evSize > maxSize {
906 throw("invalid length of trace event")
907 }
908 if lenp != nil {
909
910 *lenp = byte(evSize - 2)
911 }
912 }
913
914
915
916
917 func traceCPUSample(gp *g, _ *m, pp *p, stk []uintptr) {
918 if !traceEnabled() {
919
920
921 return
922 }
923
924
925 now := traceClockNow()
926
927
928
929
930 var hdr [2]uint64
931 if pp != nil {
932
933
934 hdr[0] = uint64(pp.id)<<1 | 0b1
935 } else {
936 hdr[0] = 0b10
937 }
938 if gp != nil {
939 hdr[1] = gp.goid
940 }
941
942
943 for !trace.signalLock.CompareAndSwap(0, 1) {
944
945 osyield()
946 }
947
948 if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
949
950
951
952 log.write(nil, int64(now), hdr[:], stk)
953 }
954
955 trace.signalLock.Store(0)
956 }
957
958 func traceReadCPU() {
959 bufp := &trace.cpuLogBuf
960
961 for {
962 data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
963 if len(data) == 0 {
964 break
965 }
966 for len(data) > 0 {
967 if len(data) < 4 || data[0] > uint64(len(data)) {
968 break
969 }
970 if data[0] < 4 || tags != nil && len(tags) < 1 {
971 break
972 }
973 if len(tags) < 1 {
974 break
975 }
976 timestamp := data[1]
977 ppid := data[2] >> 1
978 if hasP := (data[2] & 0b1) != 0; !hasP {
979 ppid = ^uint64(0)
980 }
981 goid := data[3]
982 stk := data[4:data[0]]
983 empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
984 data = data[data[0]:]
985
986
987
988
989 tags = tags[1:]
990
991 if empty {
992
993
994
995
996
997
998 continue
999 }
1000
1001 buf := bufp.ptr()
1002 if buf == nil {
1003 systemstack(func() {
1004 *bufp = traceFlush(*bufp, 0)
1005 })
1006 buf = bufp.ptr()
1007 }
1008 nstk := 1
1009 buf.stk[0] = logicalStackSentinel
1010 for ; nstk < len(buf.stk) && nstk-1 < len(stk); nstk++ {
1011 buf.stk[nstk] = uintptr(stk[nstk-1])
1012 }
1013 stackID := trace.stackTab.put(buf.stk[:nstk])
1014
1015 traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp, ppid, goid)
1016 }
1017 }
1018 }
1019
1020
1021
1022
1023
1024 const logicalStackSentinel = ^uintptr(0)
1025
1026
1027
1028
1029
1030 func traceStackID(mp *m, pcBuf []uintptr, skip int) uint64 {
1031 gp := getg()
1032 curgp := mp.curg
1033 nstk := 1
1034 if tracefpunwindoff() || mp.hasCgoOnStack() {
1035
1036
1037
1038
1039
1040
1041 pcBuf[0] = logicalStackSentinel
1042 if curgp == gp {
1043 nstk += callers(skip+1, pcBuf[1:])
1044 } else if curgp != nil {
1045 nstk += gcallers(curgp, skip, pcBuf[1:])
1046 }
1047 } else {
1048
1049 pcBuf[0] = uintptr(skip)
1050 if curgp == gp {
1051 nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
1052 } else if curgp != nil {
1053
1054
1055
1056
1057
1058 pcBuf[1] = curgp.sched.pc
1059 nstk += 1 + fpTracebackPCs(unsafe.Pointer(curgp.sched.bp), pcBuf[2:])
1060 }
1061 }
1062 if nstk > 0 {
1063 nstk--
1064 }
1065 if nstk > 0 && curgp.goid == 1 {
1066 nstk--
1067 }
1068 id := trace.stackTab.put(pcBuf[:nstk])
1069 return uint64(id)
1070 }
1071
1072
1073
1074
1075 func tracefpunwindoff() bool {
1076 return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64) || goos.IsPlan9 == 1
1077 }
1078
1079
1080
1081
1082
1083 func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
1084 for i = 0; i < len(pcBuf) && fp != nil; i++ {
1085
1086 pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
1087
1088 fp = unsafe.Pointer(*(*uintptr)(fp))
1089 }
1090 return i
1091 }
1092
1093
1094 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
1095
1096
1097
1098 lockRankMayTraceFlush()
1099
1100 mp = acquirem()
1101 if p := mp.p.ptr(); p != nil {
1102 return mp, p.id, &p.trace.buf
1103 }
1104 lock(&trace.bufLock)
1105 return mp, traceGlobProc, &trace.buf
1106 }
1107
1108
1109 func traceReleaseBuffer(mp *m, pid int32) {
1110 if pid == traceGlobProc {
1111 unlock(&trace.bufLock)
1112 }
1113 releasem(mp)
1114 }
1115
1116
1117
1118 func lockRankMayTraceFlush() {
1119 lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
1120 }
1121
1122
1123
1124
1125
1126
1127 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
1128 lock(&trace.lock)
1129 if buf != 0 {
1130 traceFullQueue(buf)
1131 }
1132 if trace.empty != 0 {
1133 buf = trace.empty
1134 trace.empty = buf.ptr().link
1135 } else {
1136 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
1137 if buf == 0 {
1138 throw("trace: out of memory")
1139 }
1140 }
1141 bufp := buf.ptr()
1142 bufp.link.set(nil)
1143 bufp.pos = 0
1144
1145
1146 ts := traceClockNow()
1147 if ts <= bufp.lastTime {
1148 ts = bufp.lastTime + 1
1149 }
1150 bufp.lastTime = ts
1151 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
1152 bufp.varint(uint64(pid))
1153 bufp.varint(uint64(ts))
1154
1155 unlock(&trace.lock)
1156 return buf
1157 }
1158
1159
1160 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
1161 if s == "" {
1162 return 0, bufp
1163 }
1164
1165 lock(&trace.stringsLock)
1166 if raceenabled {
1167
1168
1169 raceacquire(unsafe.Pointer(&trace.stringsLock))
1170 }
1171
1172 if id, ok := trace.strings[s]; ok {
1173 if raceenabled {
1174 racerelease(unsafe.Pointer(&trace.stringsLock))
1175 }
1176 unlock(&trace.stringsLock)
1177
1178 return id, bufp
1179 }
1180
1181 trace.stringSeq++
1182 id := trace.stringSeq
1183 trace.strings[s] = id
1184
1185 if raceenabled {
1186 racerelease(unsafe.Pointer(&trace.stringsLock))
1187 }
1188 unlock(&trace.stringsLock)
1189
1190
1191
1192
1193
1194
1195 buf := bufp.ptr()
1196 size := 1 + 2*traceBytesPerNumber + len(s)
1197 if buf == nil || len(buf.arr)-buf.pos < size {
1198 systemstack(func() {
1199 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
1200 bufp.set(buf)
1201 })
1202 }
1203 buf.byte(traceEvString)
1204 buf.varint(id)
1205
1206
1207
1208 slen := len(s)
1209 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1210 slen = room
1211 }
1212
1213 buf.varint(uint64(slen))
1214 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
1215
1216 bufp.set(buf)
1217 return id, bufp
1218 }
1219
1220
1221 func (buf *traceBuf) varint(v uint64) {
1222 pos := buf.pos
1223 for ; v >= 0x80; v >>= 7 {
1224 buf.arr[pos] = 0x80 | byte(v)
1225 pos++
1226 }
1227 buf.arr[pos] = byte(v)
1228 pos++
1229 buf.pos = pos
1230 }
1231
1232
1233
1234
1235
1236 func (buf *traceBuf) varintAt(pos int, v uint64) {
1237 for i := 0; i < traceBytesPerNumber; i++ {
1238 if i < traceBytesPerNumber-1 {
1239 buf.arr[pos] = 0x80 | byte(v)
1240 } else {
1241 buf.arr[pos] = byte(v)
1242 }
1243 v >>= 7
1244 pos++
1245 }
1246 }
1247
1248
1249 func (buf *traceBuf) byte(v byte) {
1250 buf.arr[buf.pos] = v
1251 buf.pos++
1252 }
1253
1254
1255
1256 type traceStackTable struct {
1257 lock mutex
1258 seq uint32
1259 mem traceAlloc
1260 tab [1 << 13]traceStackPtr
1261 }
1262
1263
1264 type traceStack struct {
1265 link traceStackPtr
1266 hash uintptr
1267 id uint32
1268 n int
1269 stk [0]uintptr
1270 }
1271
1272 type traceStackPtr uintptr
1273
1274 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
1275
1276
1277 func (ts *traceStack) stack() []uintptr {
1278 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
1279 }
1280
1281
1282
1283 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
1284 if len(pcs) == 0 {
1285 return 0
1286 }
1287 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
1288
1289 if id := tab.find(pcs, hash); id != 0 {
1290 return id
1291 }
1292
1293
1294 var id uint32
1295 systemstack(func() {
1296 lock(&tab.lock)
1297 if id = tab.find(pcs, hash); id != 0 {
1298 unlock(&tab.lock)
1299 return
1300 }
1301
1302 tab.seq++
1303 stk := tab.newStack(len(pcs))
1304 stk.hash = hash
1305 stk.id = tab.seq
1306 id = stk.id
1307 stk.n = len(pcs)
1308 stkpc := stk.stack()
1309 copy(stkpc, pcs)
1310 part := int(hash % uintptr(len(tab.tab)))
1311 stk.link = tab.tab[part]
1312 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
1313 unlock(&tab.lock)
1314 })
1315 return id
1316 }
1317
1318
1319 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
1320 part := int(hash % uintptr(len(tab.tab)))
1321 Search:
1322 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
1323 if stk.hash == hash && stk.n == len(pcs) {
1324 for i, stkpc := range stk.stack() {
1325 if stkpc != pcs[i] {
1326 continue Search
1327 }
1328 }
1329 return stk.id
1330 }
1331 }
1332 return 0
1333 }
1334
1335
1336 func (tab *traceStackTable) newStack(n int) *traceStack {
1337 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
1338 }
1339
1340
1341
1342 func traceFrames(bufp traceBufPtr, pcs []uintptr) ([]traceFrame, traceBufPtr) {
1343 frames := make([]traceFrame, 0, len(pcs))
1344 ci := CallersFrames(pcs)
1345 for {
1346 var frame traceFrame
1347 f, more := ci.Next()
1348 frame, bufp = traceFrameForPC(bufp, 0, f)
1349 frames = append(frames, frame)
1350 if !more {
1351 return frames, bufp
1352 }
1353 }
1354 }
1355
1356
1357
1358
1359
1360
1361
1362 func (tab *traceStackTable) dump(bufp traceBufPtr) traceBufPtr {
1363 for i := range tab.tab {
1364 stk := tab.tab[i].ptr()
1365 for ; stk != nil; stk = stk.link.ptr() {
1366 var frames []traceFrame
1367 frames, bufp = traceFrames(bufp, fpunwindExpand(stk.stack()))
1368
1369
1370
1371
1372 maxSize := 1 + traceBytesPerNumber + (2+4*len(frames))*traceBytesPerNumber
1373
1374 if buf := bufp.ptr(); len(buf.arr)-buf.pos < maxSize {
1375 bufp = traceFlush(bufp, 0)
1376 }
1377
1378
1379 buf := bufp.ptr()
1380 buf.byte(traceEvStack | 3<<traceArgCountShift)
1381 lenPos := buf.pos
1382 buf.pos += traceBytesPerNumber
1383
1384
1385 recPos := buf.pos
1386 buf.varint(uint64(stk.id))
1387 buf.varint(uint64(len(frames)))
1388 for _, frame := range frames {
1389 buf.varint(uint64(frame.PC))
1390 buf.varint(frame.funcID)
1391 buf.varint(frame.fileID)
1392 buf.varint(frame.line)
1393 }
1394
1395
1396 buf.varintAt(lenPos, uint64(buf.pos-recPos))
1397 }
1398 }
1399
1400 tab.mem.drop()
1401 *tab = traceStackTable{}
1402 lockInit(&((*tab).lock), lockRankTraceStackTab)
1403
1404 return bufp
1405 }
1406
1407
1408
1409
1410
1411
1412 func fpunwindExpand(pcBuf []uintptr) []uintptr {
1413 if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
1414
1415
1416 return pcBuf[1:]
1417 }
1418
1419 var (
1420 lastFuncID = abi.FuncIDNormal
1421 newPCBuf = make([]uintptr, 0, traceStackSize)
1422 skip = pcBuf[0]
1423
1424
1425 skipOrAdd = func(retPC uintptr) bool {
1426 if skip > 0 {
1427 skip--
1428 } else {
1429 newPCBuf = append(newPCBuf, retPC)
1430 }
1431 return len(newPCBuf) < cap(newPCBuf)
1432 }
1433 )
1434
1435 outer:
1436 for _, retPC := range pcBuf[1:] {
1437 callPC := retPC - 1
1438 fi := findfunc(callPC)
1439 if !fi.valid() {
1440
1441
1442 if more := skipOrAdd(retPC); !more {
1443 break outer
1444 }
1445 continue
1446 }
1447
1448 u, uf := newInlineUnwinder(fi, callPC)
1449 for ; uf.valid(); uf = u.next(uf) {
1450 sf := u.srcFunc(uf)
1451 if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
1452
1453 } else if more := skipOrAdd(uf.pc + 1); !more {
1454 break outer
1455 }
1456 lastFuncID = sf.funcID
1457 }
1458 }
1459 return newPCBuf
1460 }
1461
1462 type traceFrame struct {
1463 PC uintptr
1464 funcID uint64
1465 fileID uint64
1466 line uint64
1467 }
1468
1469
1470
1471 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
1472 bufp := &buf
1473 var frame traceFrame
1474 frame.PC = f.PC
1475
1476 fn := f.Function
1477 const maxLen = 1 << 10
1478 if len(fn) > maxLen {
1479 fn = fn[len(fn)-maxLen:]
1480 }
1481 frame.funcID, bufp = traceString(bufp, pid, fn)
1482 frame.line = uint64(f.Line)
1483 file := f.File
1484 if len(file) > maxLen {
1485 file = file[len(file)-maxLen:]
1486 }
1487 frame.fileID, bufp = traceString(bufp, pid, file)
1488 return frame, (*bufp)
1489 }
1490
1491
1492
1493 type traceAlloc struct {
1494 head traceAllocBlockPtr
1495 off uintptr
1496 }
1497
1498
1499
1500
1501
1502
1503 type traceAllocBlock struct {
1504 _ sys.NotInHeap
1505 next traceAllocBlockPtr
1506 data [64<<10 - goarch.PtrSize]byte
1507 }
1508
1509
1510 type traceAllocBlockPtr uintptr
1511
1512 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
1513 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
1514
1515
1516 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
1517 n = alignUp(n, goarch.PtrSize)
1518 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
1519 if n > uintptr(len(a.head.ptr().data)) {
1520 throw("trace: alloc too large")
1521 }
1522 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
1523 if block == nil {
1524 throw("trace: out of memory")
1525 }
1526 block.next.set(a.head.ptr())
1527 a.head.set(block)
1528 a.off = 0
1529 }
1530 p := &a.head.ptr().data[a.off]
1531 a.off += n
1532 return unsafe.Pointer(p)
1533 }
1534
1535
1536 func (a *traceAlloc) drop() {
1537 for a.head != 0 {
1538 block := a.head.ptr()
1539 a.head.set(block.next.ptr())
1540 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
1541 }
1542 }
1543
1544
1545
1546 func (_ traceLocker) Gomaxprocs(procs int32) {
1547 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
1548 }
1549
1550 func (_ traceLocker) ProcStart() {
1551 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
1552 }
1553
1554 func (_ traceLocker) ProcStop(pp *p) {
1555
1556
1557 mp := acquirem()
1558 oldp := mp.p
1559 mp.p.set(pp)
1560 traceEvent(traceEvProcStop, -1)
1561 mp.p = oldp
1562 releasem(mp)
1563 }
1564
1565 func (_ traceLocker) GCStart() {
1566 traceEvent(traceEvGCStart, 3, trace.seqGC)
1567 trace.seqGC++
1568 }
1569
1570 func (_ traceLocker) GCDone() {
1571 traceEvent(traceEvGCDone, -1)
1572 }
1573
1574 func (_ traceLocker) STWStart(reason stwReason) {
1575
1576
1577 if reason == stwStartTrace || reason == stwStopTrace {
1578 return
1579 }
1580 getg().m.trace.tracedSTWStart = true
1581 traceEvent(traceEvSTWStart, -1, uint64(reason))
1582 }
1583
1584 func (_ traceLocker) STWDone() {
1585 mp := getg().m
1586 if !mp.trace.tracedSTWStart {
1587 return
1588 }
1589 mp.trace.tracedSTWStart = false
1590 traceEvent(traceEvSTWDone, -1)
1591 }
1592
1593
1594
1595
1596
1597
1598 func (_ traceLocker) GCSweepStart() {
1599
1600
1601 pp := getg().m.p.ptr()
1602 if pp.trace.inSweep {
1603 throw("double traceGCSweepStart")
1604 }
1605 pp.trace.inSweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
1606 }
1607
1608
1609
1610
1611
1612 func (_ traceLocker) GCSweepSpan(bytesSwept uintptr) {
1613 pp := getg().m.p.ptr()
1614 if pp.trace.inSweep {
1615 if pp.trace.swept == 0 {
1616 traceEvent(traceEvGCSweepStart, 1)
1617 }
1618 pp.trace.swept += bytesSwept
1619 }
1620 }
1621
1622 func (_ traceLocker) GCSweepDone() {
1623 pp := getg().m.p.ptr()
1624 if !pp.trace.inSweep {
1625 throw("missing traceGCSweepStart")
1626 }
1627 if pp.trace.swept != 0 {
1628 traceEvent(traceEvGCSweepDone, -1, uint64(pp.trace.swept), uint64(pp.trace.reclaimed))
1629 }
1630 pp.trace.inSweep = false
1631 }
1632
1633 func (_ traceLocker) GCMarkAssistStart() {
1634 traceEvent(traceEvGCMarkAssistStart, 1)
1635 }
1636
1637 func (_ traceLocker) GCMarkAssistDone() {
1638 traceEvent(traceEvGCMarkAssistDone, -1)
1639 }
1640
1641 func (_ traceLocker) GoCreate(newg *g, pc uintptr) {
1642 newg.trace.seq = 0
1643 newg.trace.lastP = getg().m.p
1644
1645 id := trace.stackTab.put([]uintptr{logicalStackSentinel, startPCforTrace(pc) + sys.PCQuantum})
1646 traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
1647 }
1648
1649 func (_ traceLocker) GoStart() {
1650 gp := getg().m.curg
1651 pp := gp.m.p
1652 gp.trace.seq++
1653 if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1654 traceEvent(traceEvGoStartLabel, -1, gp.goid, gp.trace.seq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
1655 } else if gp.trace.lastP == pp {
1656 traceEvent(traceEvGoStartLocal, -1, gp.goid)
1657 } else {
1658 gp.trace.lastP = pp
1659 traceEvent(traceEvGoStart, -1, gp.goid, gp.trace.seq)
1660 }
1661 }
1662
1663 func (_ traceLocker) GoEnd() {
1664 traceEvent(traceEvGoEnd, -1)
1665 }
1666
1667 func (_ traceLocker) GoSched() {
1668 gp := getg()
1669 gp.trace.lastP = gp.m.p
1670 traceEvent(traceEvGoSched, 1)
1671 }
1672
1673 func (_ traceLocker) GoPreempt() {
1674 gp := getg()
1675 gp.trace.lastP = gp.m.p
1676 traceEvent(traceEvGoPreempt, 1)
1677 }
1678
1679 func (_ traceLocker) GoPark(reason traceBlockReason, skip int) {
1680
1681
1682 traceEvent(byte(reason), skip)
1683 }
1684
1685 func (_ traceLocker) GoUnpark(gp *g, skip int) {
1686 pp := getg().m.p
1687 gp.trace.seq++
1688 if gp.trace.lastP == pp {
1689 traceEvent(traceEvGoUnblockLocal, skip, gp.goid)
1690 } else {
1691 gp.trace.lastP = pp
1692 traceEvent(traceEvGoUnblock, skip, gp.goid, gp.trace.seq)
1693 }
1694 }
1695
1696 func (_ traceLocker) GoSysCall() {
1697 var skip int
1698 switch {
1699 case tracefpunwindoff():
1700
1701
1702
1703
1704
1705 skip = 1
1706 case GOOS == "solaris" || GOOS == "illumos":
1707
1708 skip = 3
1709 default:
1710
1711 skip = 4
1712 }
1713 getg().m.curg.trace.tracedSyscallEnter = true
1714 traceEvent(traceEvGoSysCall, skip)
1715 }
1716
1717 func (_ traceLocker) GoSysExit(lostP bool) {
1718 if !lostP {
1719 throw("lostP must always be true in the old tracer for GoSysExit")
1720 }
1721 gp := getg().m.curg
1722 if !gp.trace.tracedSyscallEnter {
1723
1724
1725 return
1726 }
1727 gp.trace.tracedSyscallEnter = false
1728 ts := gp.trace.sysExitTime
1729 if ts != 0 && ts < trace.startTime {
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 ts = 0
1740 }
1741 gp.trace.sysExitTime = 0
1742 gp.trace.seq++
1743 gp.trace.lastP = gp.m.p
1744 traceEvent(traceEvGoSysExit, -1, gp.goid, gp.trace.seq, uint64(ts))
1745 }
1746
1747
1748
1749
1750 func (_ traceLocker) RecordSyscallExitedTime(gp *g, oldp *p) {
1751
1752
1753 for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
1754 osyield()
1755 }
1756
1757
1758
1759
1760 gp.trace.sysExitTime = traceClockNow()
1761 }
1762
1763 func (_ traceLocker) GoSysBlock(pp *p) {
1764
1765
1766 mp := acquirem()
1767 oldp := mp.p
1768 mp.p.set(pp)
1769 traceEvent(traceEvGoSysBlock, -1)
1770 mp.p = oldp
1771 releasem(mp)
1772 }
1773
1774 func (t traceLocker) ProcSteal(pp *p, forMe bool) {
1775 t.ProcStop(pp)
1776 }
1777
1778 func (_ traceLocker) HeapAlloc(live uint64) {
1779 traceEvent(traceEvHeapAlloc, -1, live)
1780 }
1781
1782 func (_ traceLocker) HeapGoal() {
1783 heapGoal := gcController.heapGoal()
1784 if heapGoal == ^uint64(0) {
1785
1786 traceEvent(traceEvHeapGoal, -1, 0)
1787 } else {
1788 traceEvent(traceEvHeapGoal, -1, heapGoal)
1789 }
1790 }
1791
1792
1793
1794
1795
1796 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1797 if !trace.enabled {
1798 return
1799 }
1800
1801
1802 mp, pid, bufp := traceAcquireBuffer()
1803 if !trace.enabled && !mp.trace.startingTrace {
1804 traceReleaseBuffer(mp, pid)
1805 return
1806 }
1807
1808 typeStringID, bufp := traceString(bufp, pid, taskType)
1809 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
1810 traceReleaseBuffer(mp, pid)
1811 }
1812
1813
1814 func trace_userTaskEnd(id uint64) {
1815 traceEvent(traceEvUserTaskEnd, 2, id)
1816 }
1817
1818
1819 func trace_userRegion(id, mode uint64, name string) {
1820 if !trace.enabled {
1821 return
1822 }
1823
1824 mp, pid, bufp := traceAcquireBuffer()
1825 if !trace.enabled && !mp.trace.startingTrace {
1826 traceReleaseBuffer(mp, pid)
1827 return
1828 }
1829
1830 nameStringID, bufp := traceString(bufp, pid, name)
1831 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
1832 traceReleaseBuffer(mp, pid)
1833 }
1834
1835
1836 func trace_userLog(id uint64, category, message string) {
1837 if !trace.enabled {
1838 return
1839 }
1840
1841 mp, pid, bufp := traceAcquireBuffer()
1842 if !trace.enabled && !mp.trace.startingTrace {
1843 traceReleaseBuffer(mp, pid)
1844 return
1845 }
1846
1847 categoryID, bufp := traceString(bufp, pid, category)
1848
1849
1850
1851
1852
1853 extraSpace := traceBytesPerNumber + len(message)
1854 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
1855 buf := bufp.ptr()
1856
1857
1858
1859 slen := len(message)
1860 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1861 slen = room
1862 }
1863 buf.varint(uint64(slen))
1864 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1865
1866 traceReleaseBuffer(mp, pid)
1867 }
1868
1869
1870
1871 func startPCforTrace(pc uintptr) uintptr {
1872 f := findfunc(pc)
1873 if !f.valid() {
1874 return pc
1875 }
1876 w := funcdata(f, abi.FUNCDATA_WrapInfo)
1877 if w == nil {
1878 return pc
1879 }
1880 return f.datap.textAddr(*(*uint32)(w))
1881 }
1882
1883
1884
1885
1886
1887 func (tl traceLocker) OneNewExtraM(gp *g) {
1888
1889
1890
1891 tl.GoCreate(gp, 0)
1892 gp.trace.seq++
1893 traceEvent(traceEvGoInSyscall, -1, gp.goid)
1894 }
1895
1896
1897 func (tl traceLocker) GoCreateSyscall(gp *g) {
1898 }
1899
1900
1901 func (tl traceLocker) GoDestroySyscall() {
1902 }
1903
1904
1905 type traceTime uint64
1906
1907
1908
1909
1910
1911
1912
1913
1914 func traceClockNow() traceTime {
1915 return traceTime(cputicks() / traceTimeDiv)
1916 }
1917
1918 func traceExitingSyscall() {
1919 }
1920
1921 func traceExitedSyscall() {
1922 }
1923
1924
1925 const defaultTraceAdvancePeriod = 0
1926
View as plain text