Source file
src/runtime/mprof.go
1
2
3
4
5
6
7
8 package runtime
9
10 import (
11 "internal/abi"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
18 var (
19
20 profInsertLock mutex
21
22 profBlockLock mutex
23
24 profMemActiveLock mutex
25
26
27 profMemFutureLock [len(memRecord{}.future)]mutex
28 )
29
30
31
32
33 const (
34
35 memProfile bucketType = 1 + iota
36 blockProfile
37 mutexProfile
38
39
40 buckHashSize = 179999
41
42
43 maxStack = 32
44 )
45
46 type bucketType int
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 type bucket struct {
62 _ sys.NotInHeap
63 next *bucket
64 allnext *bucket
65 typ bucketType
66 hash uintptr
67 size uintptr
68 nstk uintptr
69 }
70
71
72
73 type memRecord struct {
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 active memRecordCycle
119
120
121
122
123
124
125
126
127
128
129
130 future [3]memRecordCycle
131 }
132
133
134 type memRecordCycle struct {
135 allocs, frees uintptr
136 alloc_bytes, free_bytes uintptr
137 }
138
139
140 func (a *memRecordCycle) add(b *memRecordCycle) {
141 a.allocs += b.allocs
142 a.frees += b.frees
143 a.alloc_bytes += b.alloc_bytes
144 a.free_bytes += b.free_bytes
145 }
146
147
148
149 type blockRecord struct {
150 count float64
151 cycles int64
152 }
153
154 var (
155 mbuckets atomic.UnsafePointer
156 bbuckets atomic.UnsafePointer
157 xbuckets atomic.UnsafePointer
158 buckhash atomic.UnsafePointer
159
160 mProfCycle mProfCycleHolder
161 )
162
163 type buckhashArray [buckHashSize]atomic.UnsafePointer
164
165 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
166
167
168
169
170
171 type mProfCycleHolder struct {
172 value atomic.Uint32
173 }
174
175
176 func (c *mProfCycleHolder) read() (cycle uint32) {
177 v := c.value.Load()
178 cycle = v >> 1
179 return cycle
180 }
181
182
183
184 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
185 for {
186 prev := c.value.Load()
187 cycle = prev >> 1
188 alreadyFlushed = (prev & 0x1) != 0
189 next := prev | 0x1
190 if c.value.CompareAndSwap(prev, next) {
191 return cycle, alreadyFlushed
192 }
193 }
194 }
195
196
197
198 func (c *mProfCycleHolder) increment() {
199
200
201
202 for {
203 prev := c.value.Load()
204 cycle := prev >> 1
205 cycle = (cycle + 1) % mProfCycleWrap
206 next := cycle << 1
207 if c.value.CompareAndSwap(prev, next) {
208 break
209 }
210 }
211 }
212
213
214 func newBucket(typ bucketType, nstk int) *bucket {
215 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
216 switch typ {
217 default:
218 throw("invalid profile bucket type")
219 case memProfile:
220 size += unsafe.Sizeof(memRecord{})
221 case blockProfile, mutexProfile:
222 size += unsafe.Sizeof(blockRecord{})
223 }
224
225 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
226 b.typ = typ
227 b.nstk = uintptr(nstk)
228 return b
229 }
230
231
232 func (b *bucket) stk() []uintptr {
233 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
234 return stk[:b.nstk:b.nstk]
235 }
236
237
238 func (b *bucket) mp() *memRecord {
239 if b.typ != memProfile {
240 throw("bad use of bucket.mp")
241 }
242 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
243 return (*memRecord)(data)
244 }
245
246
247 func (b *bucket) bp() *blockRecord {
248 if b.typ != blockProfile && b.typ != mutexProfile {
249 throw("bad use of bucket.bp")
250 }
251 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
252 return (*blockRecord)(data)
253 }
254
255
256 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
257 bh := (*buckhashArray)(buckhash.Load())
258 if bh == nil {
259 lock(&profInsertLock)
260
261 bh = (*buckhashArray)(buckhash.Load())
262 if bh == nil {
263 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
264 if bh == nil {
265 throw("runtime: cannot allocate memory")
266 }
267 buckhash.StoreNoWB(unsafe.Pointer(bh))
268 }
269 unlock(&profInsertLock)
270 }
271
272
273 var h uintptr
274 for _, pc := range stk {
275 h += pc
276 h += h << 10
277 h ^= h >> 6
278 }
279
280 h += size
281 h += h << 10
282 h ^= h >> 6
283
284 h += h << 3
285 h ^= h >> 11
286
287 i := int(h % buckHashSize)
288
289 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
290 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
291 return b
292 }
293 }
294
295 if !alloc {
296 return nil
297 }
298
299 lock(&profInsertLock)
300
301 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
302 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
303 unlock(&profInsertLock)
304 return b
305 }
306 }
307
308
309 b := newBucket(typ, len(stk))
310 copy(b.stk(), stk)
311 b.hash = h
312 b.size = size
313
314 var allnext *atomic.UnsafePointer
315 if typ == memProfile {
316 allnext = &mbuckets
317 } else if typ == mutexProfile {
318 allnext = &xbuckets
319 } else {
320 allnext = &bbuckets
321 }
322
323 b.next = (*bucket)(bh[i].Load())
324 b.allnext = (*bucket)(allnext.Load())
325
326 bh[i].StoreNoWB(unsafe.Pointer(b))
327 allnext.StoreNoWB(unsafe.Pointer(b))
328
329 unlock(&profInsertLock)
330 return b
331 }
332
333 func eqslice(x, y []uintptr) bool {
334 if len(x) != len(y) {
335 return false
336 }
337 for i, xi := range x {
338 if xi != y[i] {
339 return false
340 }
341 }
342 return true
343 }
344
345
346
347
348
349
350
351
352
353 func mProf_NextCycle() {
354 mProfCycle.increment()
355 }
356
357
358
359
360
361
362
363
364 func mProf_Flush() {
365 cycle, alreadyFlushed := mProfCycle.setFlushed()
366 if alreadyFlushed {
367 return
368 }
369
370 index := cycle % uint32(len(memRecord{}.future))
371 lock(&profMemActiveLock)
372 lock(&profMemFutureLock[index])
373 mProf_FlushLocked(index)
374 unlock(&profMemFutureLock[index])
375 unlock(&profMemActiveLock)
376 }
377
378
379
380
381
382 func mProf_FlushLocked(index uint32) {
383 assertLockHeld(&profMemActiveLock)
384 assertLockHeld(&profMemFutureLock[index])
385 head := (*bucket)(mbuckets.Load())
386 for b := head; b != nil; b = b.allnext {
387 mp := b.mp()
388
389
390
391 mpc := &mp.future[index]
392 mp.active.add(mpc)
393 *mpc = memRecordCycle{}
394 }
395 }
396
397
398
399
400
401 func mProf_PostSweep() {
402
403
404
405
406
407 cycle := mProfCycle.read() + 1
408
409 index := cycle % uint32(len(memRecord{}.future))
410 lock(&profMemActiveLock)
411 lock(&profMemFutureLock[index])
412 mProf_FlushLocked(index)
413 unlock(&profMemFutureLock[index])
414 unlock(&profMemActiveLock)
415 }
416
417
418 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
419 var stk [maxStack]uintptr
420 nstk := callers(4, stk[:])
421
422 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
423
424 b := stkbucket(memProfile, size, stk[:nstk], true)
425 mp := b.mp()
426 mpc := &mp.future[index]
427
428 lock(&profMemFutureLock[index])
429 mpc.allocs++
430 mpc.alloc_bytes += size
431 unlock(&profMemFutureLock[index])
432
433
434
435
436
437 systemstack(func() {
438 setprofilebucket(p, b)
439 })
440 }
441
442
443 func mProf_Free(b *bucket, size uintptr) {
444 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
445
446 mp := b.mp()
447 mpc := &mp.future[index]
448
449 lock(&profMemFutureLock[index])
450 mpc.frees++
451 mpc.free_bytes += size
452 unlock(&profMemFutureLock[index])
453 }
454
455 var blockprofilerate uint64
456
457
458
459
460
461
462
463 func SetBlockProfileRate(rate int) {
464 var r int64
465 if rate <= 0 {
466 r = 0
467 } else if rate == 1 {
468 r = 1
469 } else {
470
471 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
472 if r == 0 {
473 r = 1
474 }
475 }
476
477 atomic.Store64(&blockprofilerate, uint64(r))
478 }
479
480 func blockevent(cycles int64, skip int) {
481 if cycles <= 0 {
482 cycles = 1
483 }
484
485 rate := int64(atomic.Load64(&blockprofilerate))
486 if blocksampled(cycles, rate) {
487 saveblockevent(cycles, rate, skip+1, blockProfile)
488 }
489 }
490
491
492
493 func blocksampled(cycles, rate int64) bool {
494 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
495 return false
496 }
497 return true
498 }
499
500 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
501 gp := getg()
502 var nstk int
503 var stk [maxStack]uintptr
504 if gp.m.curg == nil || gp.m.curg == gp {
505 nstk = callers(skip, stk[:])
506 } else {
507 nstk = gcallers(gp.m.curg, skip, stk[:])
508 }
509 b := stkbucket(which, 0, stk[:nstk], true)
510 bp := b.bp()
511
512 lock(&profBlockLock)
513
514
515
516
517
518 if which == blockProfile && cycles < rate {
519
520 bp.count += float64(rate) / float64(cycles)
521 bp.cycles += rate
522 } else if which == mutexProfile {
523 bp.count += float64(rate)
524 bp.cycles += rate * cycles
525 } else {
526 bp.count++
527 bp.cycles += cycles
528 }
529 unlock(&profBlockLock)
530 }
531
532 var mutexprofilerate uint64
533
534
535
536
537
538
539
540
541 func SetMutexProfileFraction(rate int) int {
542 if rate < 0 {
543 return int(mutexprofilerate)
544 }
545 old := mutexprofilerate
546 atomic.Store64(&mutexprofilerate, uint64(rate))
547 return int(old)
548 }
549
550
551 func mutexevent(cycles int64, skip int) {
552 if cycles < 0 {
553 cycles = 0
554 }
555 rate := int64(atomic.Load64(&mutexprofilerate))
556
557
558 if rate > 0 && int64(fastrand())%rate == 0 {
559 saveblockevent(cycles, rate, skip+1, mutexProfile)
560 }
561 }
562
563
564
565
566 type StackRecord struct {
567 Stack0 [32]uintptr
568 }
569
570
571
572 func (r *StackRecord) Stack() []uintptr {
573 for i, v := range r.Stack0 {
574 if v == 0 {
575 return r.Stack0[0:i]
576 }
577 }
578 return r.Stack0[0:]
579 }
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595 var MemProfileRate int = 512 * 1024
596
597
598
599
600 var disableMemoryProfiling bool
601
602
603
604 type MemProfileRecord struct {
605 AllocBytes, FreeBytes int64
606 AllocObjects, FreeObjects int64
607 Stack0 [32]uintptr
608 }
609
610
611 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
612
613
614 func (r *MemProfileRecord) InUseObjects() int64 {
615 return r.AllocObjects - r.FreeObjects
616 }
617
618
619
620 func (r *MemProfileRecord) Stack() []uintptr {
621 for i, v := range r.Stack0 {
622 if v == 0 {
623 return r.Stack0[0:i]
624 }
625 }
626 return r.Stack0[0:]
627 }
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
651 cycle := mProfCycle.read()
652
653
654
655 index := cycle % uint32(len(memRecord{}.future))
656 lock(&profMemActiveLock)
657 lock(&profMemFutureLock[index])
658 mProf_FlushLocked(index)
659 unlock(&profMemFutureLock[index])
660 clear := true
661 head := (*bucket)(mbuckets.Load())
662 for b := head; b != nil; b = b.allnext {
663 mp := b.mp()
664 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
665 n++
666 }
667 if mp.active.allocs != 0 || mp.active.frees != 0 {
668 clear = false
669 }
670 }
671 if clear {
672
673
674
675
676 n = 0
677 for b := head; b != nil; b = b.allnext {
678 mp := b.mp()
679 for c := range mp.future {
680 lock(&profMemFutureLock[c])
681 mp.active.add(&mp.future[c])
682 mp.future[c] = memRecordCycle{}
683 unlock(&profMemFutureLock[c])
684 }
685 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
686 n++
687 }
688 }
689 }
690 if n <= len(p) {
691 ok = true
692 idx := 0
693 for b := head; b != nil; b = b.allnext {
694 mp := b.mp()
695 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
696 record(&p[idx], b)
697 idx++
698 }
699 }
700 }
701 unlock(&profMemActiveLock)
702 return
703 }
704
705
706 func record(r *MemProfileRecord, b *bucket) {
707 mp := b.mp()
708 r.AllocBytes = int64(mp.active.alloc_bytes)
709 r.FreeBytes = int64(mp.active.free_bytes)
710 r.AllocObjects = int64(mp.active.allocs)
711 r.FreeObjects = int64(mp.active.frees)
712 if raceenabled {
713 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
714 }
715 if msanenabled {
716 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
717 }
718 if asanenabled {
719 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
720 }
721 copy(r.Stack0[:], b.stk())
722 for i := int(b.nstk); i < len(r.Stack0); i++ {
723 r.Stack0[i] = 0
724 }
725 }
726
727 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
728 lock(&profMemActiveLock)
729 head := (*bucket)(mbuckets.Load())
730 for b := head; b != nil; b = b.allnext {
731 mp := b.mp()
732 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
733 }
734 unlock(&profMemActiveLock)
735 }
736
737
738
739 type BlockProfileRecord struct {
740 Count int64
741 Cycles int64
742 StackRecord
743 }
744
745
746
747
748
749
750
751
752 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
753 lock(&profBlockLock)
754 head := (*bucket)(bbuckets.Load())
755 for b := head; b != nil; b = b.allnext {
756 n++
757 }
758 if n <= len(p) {
759 ok = true
760 for b := head; b != nil; b = b.allnext {
761 bp := b.bp()
762 r := &p[0]
763 r.Count = int64(bp.count)
764
765
766 if r.Count == 0 {
767 r.Count = 1
768 }
769 r.Cycles = bp.cycles
770 if raceenabled {
771 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
772 }
773 if msanenabled {
774 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
775 }
776 if asanenabled {
777 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
778 }
779 i := copy(r.Stack0[:], b.stk())
780 for ; i < len(r.Stack0); i++ {
781 r.Stack0[i] = 0
782 }
783 p = p[1:]
784 }
785 }
786 unlock(&profBlockLock)
787 return
788 }
789
790
791
792
793
794
795
796 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
797 lock(&profBlockLock)
798 head := (*bucket)(xbuckets.Load())
799 for b := head; b != nil; b = b.allnext {
800 n++
801 }
802 if n <= len(p) {
803 ok = true
804 for b := head; b != nil; b = b.allnext {
805 bp := b.bp()
806 r := &p[0]
807 r.Count = int64(bp.count)
808 r.Cycles = bp.cycles
809 i := copy(r.Stack0[:], b.stk())
810 for ; i < len(r.Stack0); i++ {
811 r.Stack0[i] = 0
812 }
813 p = p[1:]
814 }
815 }
816 unlock(&profBlockLock)
817 return
818 }
819
820
821
822
823
824
825
826 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
827 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
828 for mp := first; mp != nil; mp = mp.alllink {
829 n++
830 }
831 if n <= len(p) {
832 ok = true
833 i := 0
834 for mp := first; mp != nil; mp = mp.alllink {
835 p[i].Stack0 = mp.createstack
836 i++
837 }
838 }
839 return
840 }
841
842
843 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
844 return goroutineProfileWithLabels(p, labels)
845 }
846
847
848 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
849 if labels != nil && len(labels) != len(p) {
850 labels = nil
851 }
852
853 return goroutineProfileWithLabelsConcurrent(p, labels)
854 }
855
856 var goroutineProfile = struct {
857 sema uint32
858 active bool
859 offset atomic.Int64
860 records []StackRecord
861 labels []unsafe.Pointer
862 }{
863 sema: 1,
864 }
865
866
867
868
869
870
871
872
873
874
875
876
877 type goroutineProfileState uint32
878
879 const (
880 goroutineProfileAbsent goroutineProfileState = iota
881 goroutineProfileInProgress
882 goroutineProfileSatisfied
883 )
884
885 type goroutineProfileStateHolder atomic.Uint32
886
887 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
888 return goroutineProfileState((*atomic.Uint32)(p).Load())
889 }
890
891 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
892 (*atomic.Uint32)(p).Store(uint32(value))
893 }
894
895 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
896 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
897 }
898
899 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
900 semacquire(&goroutineProfile.sema)
901
902 ourg := getg()
903
904 stopTheWorld(stwGoroutineProfile)
905
906
907
908
909
910
911
912 n = int(gcount())
913 if fingStatus.Load()&fingRunningFinalizer != 0 {
914 n++
915 }
916
917 if n > len(p) {
918
919
920
921 startTheWorld()
922 semrelease(&goroutineProfile.sema)
923 return n, false
924 }
925
926
927 sp := getcallersp()
928 pc := getcallerpc()
929 systemstack(func() {
930 saveg(pc, sp, ourg, &p[0])
931 })
932 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
933 goroutineProfile.offset.Store(1)
934
935
936
937
938
939
940 goroutineProfile.active = true
941 goroutineProfile.records = p
942 goroutineProfile.labels = labels
943
944
945
946 if fing != nil {
947 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
948 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
949 doRecordGoroutineProfile(fing)
950 }
951 }
952 startTheWorld()
953
954
955
956
957
958
959
960
961
962
963
964
965 forEachGRace(func(gp1 *g) {
966 tryRecordGoroutineProfile(gp1, Gosched)
967 })
968
969 stopTheWorld(stwGoroutineProfileCleanup)
970 endOffset := goroutineProfile.offset.Swap(0)
971 goroutineProfile.active = false
972 goroutineProfile.records = nil
973 goroutineProfile.labels = nil
974 startTheWorld()
975
976
977
978 forEachGRace(func(gp1 *g) {
979 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
980 })
981
982 if raceenabled {
983 raceacquire(unsafe.Pointer(&labelSync))
984 }
985
986 if n != int(endOffset) {
987
988
989
990
991
992
993
994
995
996 }
997
998 semrelease(&goroutineProfile.sema)
999 return n, true
1000 }
1001
1002
1003
1004
1005
1006 func tryRecordGoroutineProfileWB(gp1 *g) {
1007 if getg().m.p.ptr() == nil {
1008 throw("no P available, write barriers are forbidden")
1009 }
1010 tryRecordGoroutineProfile(gp1, osyield)
1011 }
1012
1013
1014
1015
1016 func tryRecordGoroutineProfile(gp1 *g, yield func()) {
1017 if readgstatus(gp1) == _Gdead {
1018
1019
1020
1021
1022 return
1023 }
1024 if isSystemGoroutine(gp1, true) {
1025
1026
1027 return
1028 }
1029
1030 for {
1031 prev := gp1.goroutineProfiled.Load()
1032 if prev == goroutineProfileSatisfied {
1033
1034
1035 break
1036 }
1037 if prev == goroutineProfileInProgress {
1038
1039
1040 yield()
1041 continue
1042 }
1043
1044
1045
1046
1047
1048
1049 mp := acquirem()
1050 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1051 doRecordGoroutineProfile(gp1)
1052 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1053 }
1054 releasem(mp)
1055 }
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065 func doRecordGoroutineProfile(gp1 *g) {
1066 if readgstatus(gp1) == _Grunning {
1067 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1068 throw("cannot read stack of running goroutine")
1069 }
1070
1071 offset := int(goroutineProfile.offset.Add(1)) - 1
1072
1073 if offset >= len(goroutineProfile.records) {
1074
1075
1076
1077 return
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
1089
1090 if goroutineProfile.labels != nil {
1091 goroutineProfile.labels[offset] = gp1.labels
1092 }
1093 }
1094
1095 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1096 gp := getg()
1097
1098 isOK := func(gp1 *g) bool {
1099
1100
1101 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1102 }
1103
1104 stopTheWorld(stwGoroutineProfile)
1105
1106
1107 n = 1
1108 forEachGRace(func(gp1 *g) {
1109 if isOK(gp1) {
1110 n++
1111 }
1112 })
1113
1114 if n <= len(p) {
1115 ok = true
1116 r, lbl := p, labels
1117
1118
1119 sp := getcallersp()
1120 pc := getcallerpc()
1121 systemstack(func() {
1122 saveg(pc, sp, gp, &r[0])
1123 })
1124 r = r[1:]
1125
1126
1127 if labels != nil {
1128 lbl[0] = gp.labels
1129 lbl = lbl[1:]
1130 }
1131
1132
1133 forEachGRace(func(gp1 *g) {
1134 if !isOK(gp1) {
1135 return
1136 }
1137
1138 if len(r) == 0 {
1139
1140
1141 return
1142 }
1143
1144
1145
1146
1147 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
1148 if labels != nil {
1149 lbl[0] = gp1.labels
1150 lbl = lbl[1:]
1151 }
1152 r = r[1:]
1153 })
1154 }
1155
1156 if raceenabled {
1157 raceacquire(unsafe.Pointer(&labelSync))
1158 }
1159
1160 startTheWorld()
1161 return n, ok
1162 }
1163
1164
1165
1166
1167
1168
1169
1170 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1171
1172 return goroutineProfileWithLabels(p, nil)
1173 }
1174
1175 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
1176 var u unwinder
1177 u.initAt(pc, sp, 0, gp, unwindSilentErrors)
1178 n := tracebackPCs(&u, 0, r.Stack0[:])
1179 if n < len(r.Stack0) {
1180 r.Stack0[n] = 0
1181 }
1182 }
1183
1184
1185
1186
1187
1188 func Stack(buf []byte, all bool) int {
1189 if all {
1190 stopTheWorld(stwAllGoroutinesStack)
1191 }
1192
1193 n := 0
1194 if len(buf) > 0 {
1195 gp := getg()
1196 sp := getcallersp()
1197 pc := getcallerpc()
1198 systemstack(func() {
1199 g0 := getg()
1200
1201
1202
1203 g0.m.traceback = 1
1204 g0.writebuf = buf[0:0:len(buf)]
1205 goroutineheader(gp)
1206 traceback(pc, sp, 0, gp)
1207 if all {
1208 tracebackothers(gp)
1209 }
1210 g0.m.traceback = 0
1211 n = len(g0.writebuf)
1212 g0.writebuf = nil
1213 })
1214 }
1215
1216 if all {
1217 startTheWorld()
1218 }
1219 return n
1220 }
1221
1222
1223
1224 var tracelock mutex
1225
1226 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1227 lock(&tracelock)
1228 gp := getg()
1229 gp.m.traceback = 2
1230 if typ == nil {
1231 print("tracealloc(", p, ", ", hex(size), ")\n")
1232 } else {
1233 print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n")
1234 }
1235 if gp.m.curg == nil || gp == gp.m.curg {
1236 goroutineheader(gp)
1237 pc := getcallerpc()
1238 sp := getcallersp()
1239 systemstack(func() {
1240 traceback(pc, sp, 0, gp)
1241 })
1242 } else {
1243 goroutineheader(gp.m.curg)
1244 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
1245 }
1246 print("\n")
1247 gp.m.traceback = 0
1248 unlock(&tracelock)
1249 }
1250
1251 func tracefree(p unsafe.Pointer, size uintptr) {
1252 lock(&tracelock)
1253 gp := getg()
1254 gp.m.traceback = 2
1255 print("tracefree(", p, ", ", hex(size), ")\n")
1256 goroutineheader(gp)
1257 pc := getcallerpc()
1258 sp := getcallersp()
1259 systemstack(func() {
1260 traceback(pc, sp, 0, gp)
1261 })
1262 print("\n")
1263 gp.m.traceback = 0
1264 unlock(&tracelock)
1265 }
1266
1267 func tracegc() {
1268 lock(&tracelock)
1269 gp := getg()
1270 gp.m.traceback = 2
1271 print("tracegc()\n")
1272
1273 tracebackothers(gp)
1274 print("end tracegc\n")
1275 print("\n")
1276 gp.m.traceback = 0
1277 unlock(&tracelock)
1278 }
1279
View as plain text