-
Notifications
You must be signed in to change notification settings - Fork 100
/
fgprof.go
328 lines (294 loc) · 8.85 KB
/
fgprof.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
// fgprof is a sampling Go profiler that allows you to analyze On-CPU as well
// as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O)
// time together.
package fgprof
import (
"fmt"
"io"
"math"
"runtime"
"sort"
"strings"
"time"
"github.com/google/pprof/profile"
)
// Format decides how the output is rendered to the user.
type Format string
const (
// FormatFolded is used by Brendan Gregg's FlameGraph utility, see
// https://github.com/brendangregg/FlameGraph#2-fold-stacks.
FormatFolded Format = "folded"
// FormatPprof is used by Google's pprof utility, see
// https://github.com/google/pprof/blob/master/proto/README.md.
FormatPprof Format = "pprof"
)
// Start begins profiling the goroutines of the program and returns a function
// that needs to be invoked by the caller to stop the profiling and write the
// results to w using the given format.
func Start(w io.Writer, format Format) func() error {
startTime := time.Now()
// Go's CPU profiler uses 100hz, but 99hz might be less likely to result in
// accidental synchronization with the program we're profiling.
const hz = 99
ticker := time.NewTicker(time.Second / hz)
stopCh := make(chan struct{})
prof := &profiler{}
profile := newWallclockProfile()
var sampleCount int64
go func() {
defer ticker.Stop()
for {
select {
case <-ticker.C:
sampleCount++
stacks := prof.GoroutineProfile()
profile.Add(stacks)
case <-stopCh:
return
}
}
}()
return func() error {
stopCh <- struct{}{}
endTime := time.Now()
profile.Ignore(prof.SelfFrames()...)
// Compute actual sample rate in case, due to performance issues, we
// were not actually able to sample at the given hz. Converting
// everything to float avoids integers being rounded in the wrong
// direction and improves the correctness of times in profiles.
duration := endTime.Sub(startTime)
actualHz := float64(sampleCount) / (float64(duration) / 1e9)
return profile.Export(w, format, int(math.Round(actualHz)), startTime, endTime)
}
}
// profiler provides a convenient and performant way to access
// runtime.GoroutineProfile().
type profiler struct {
stacks []runtime.StackRecord
selfFrame *runtime.Frame
}
// nullTerminationWorkaround deals with a regression in go1.23, see:
// - https://github.com/felixge/fgprof/issues/33
// - https://go-review.googlesource.com/c/go/+/609815
var nullTerminationWorkaround = runtime.Version() == "go1.23.0"
// GoroutineProfile returns the stacks of all goroutines currently managed by
// the scheduler. This includes both goroutines that are currently running
// (On-CPU), as well as waiting (Off-CPU).
func (p *profiler) GoroutineProfile() []runtime.StackRecord {
if p.selfFrame == nil {
// Determine the runtime.Frame of this func so we can hide it from our
// profiling output.
rpc := make([]uintptr, 1)
n := runtime.Callers(1, rpc)
if n < 1 {
panic("could not determine selfFrame")
}
selfFrame, _ := runtime.CallersFrames(rpc).Next()
p.selfFrame = &selfFrame
}
// We don't know how many goroutines exist, so we have to grow p.stacks
// dynamically. We overshoot by 10% since it's possible that more goroutines
// are launched in between two calls to GoroutineProfile. Once p.stacks
// reaches the maximum number of goroutines used by the program, it will get
// reused indefinitely, eliminating GoroutineProfile calls and allocations.
//
// TODO(fg) There might be workloads where it would be nice to shrink
// p.stacks dynamically as well, but let's not over-engineer this until we
// understand those cases better.
for {
if nullTerminationWorkaround {
for i := range p.stacks {
p.stacks[i].Stack0 = [32]uintptr{}
}
}
n, ok := runtime.GoroutineProfile(p.stacks)
if !ok {
p.stacks = make([]runtime.StackRecord, int(float64(n)*1.1))
} else {
return p.stacks[0:n]
}
}
}
// SelfFrames returns frames that belong to the profiler so that we can ignore
// them when exporting the final profile.
func (p *profiler) SelfFrames() []*runtime.Frame {
if p.selfFrame != nil {
return []*runtime.Frame{p.selfFrame}
}
return nil
}
func newWallclockProfile() *wallclockProfile {
return &wallclockProfile{stacks: map[[32]uintptr]*wallclockStack{}}
}
// wallclockProfile holds a wallclock profile that can be exported in different
// formats.
type wallclockProfile struct {
stacks map[[32]uintptr]*wallclockStack
ignore []*runtime.Frame
}
// wallclockStack holds the symbolized frames of a stack trace and the number
// of times it has been seen.
type wallclockStack struct {
frames []*runtime.Frame
count int
}
// Ignore sets a list of frames that should be ignored when exporting the
// profile.
func (p *wallclockProfile) Ignore(frames ...*runtime.Frame) {
p.ignore = frames
}
// Add adds the given stack traces to the profile.
func (p *wallclockProfile) Add(stackRecords []runtime.StackRecord) {
for _, stackRecord := range stackRecords {
if _, ok := p.stacks[stackRecord.Stack0]; !ok {
ws := &wallclockStack{}
// symbolize pcs into frames
frames := runtime.CallersFrames(stackRecord.Stack())
for {
frame, more := frames.Next()
ws.frames = append(ws.frames, &frame)
if !more {
break
}
}
p.stacks[stackRecord.Stack0] = ws
}
p.stacks[stackRecord.Stack0].count++
}
}
func (p *wallclockProfile) Export(w io.Writer, f Format, hz int, startTime, endTime time.Time) error {
switch f {
case FormatFolded:
return p.exportFolded(w)
case FormatPprof:
return p.exportPprof(hz, startTime, endTime).Write(w)
default:
return fmt.Errorf("unknown format: %q", f)
}
}
// exportStacks returns the stacks in this profile except those that have been
// set to Ignore().
func (p *wallclockProfile) exportStacks() []*wallclockStack {
stacks := make([]*wallclockStack, 0, len(p.stacks))
nextStack:
for _, ws := range p.stacks {
for _, f := range ws.frames {
for _, igf := range p.ignore {
if f.Entry == igf.Entry {
continue nextStack
}
}
}
stacks = append(stacks, ws)
}
return stacks
}
func (p *wallclockProfile) exportFolded(w io.Writer) error {
var lines []string
stacks := p.exportStacks()
for _, ws := range stacks {
var foldedStack []string
for _, f := range ws.frames {
foldedStack = append(foldedStack, f.Function)
}
line := fmt.Sprintf("%s %d", strings.Join(foldedStack, ";"), ws.count)
lines = append(lines, line)
}
sort.Strings(lines)
_, err := io.WriteString(w, strings.Join(lines, "\n")+"\n")
return err
}
func (p *wallclockProfile) exportPprof(hz int, startTime, endTime time.Time) *profile.Profile {
prof := &profile.Profile{}
m := &profile.Mapping{ID: 1, HasFunctions: true}
prof.Period = int64(1e9 / hz) // Number of nanoseconds between samples.
prof.TimeNanos = startTime.UnixNano()
prof.DurationNanos = int64(endTime.Sub(startTime))
prof.Mapping = []*profile.Mapping{m}
prof.SampleType = []*profile.ValueType{
{
Type: "samples",
Unit: "count",
},
{
Type: "time",
Unit: "nanoseconds",
},
}
prof.PeriodType = &profile.ValueType{
Type: "wallclock",
Unit: "nanoseconds",
}
type functionKey struct {
Name string
Filename string
}
funcIdx := map[functionKey]*profile.Function{}
type locationKey struct {
Function functionKey
Line int
}
locationIdx := map[locationKey]*profile.Location{}
for _, ws := range p.exportStacks() {
sample := &profile.Sample{
Value: []int64{
int64(ws.count),
int64(1000 * 1000 * 1000 / hz * ws.count),
},
}
for _, frame := range ws.frames {
fnKey := functionKey{Name: frame.Function, Filename: frame.File}
function, ok := funcIdx[fnKey]
if !ok {
function = &profile.Function{
ID: uint64(len(prof.Function)) + 1,
Name: frame.Function,
SystemName: frame.Function,
Filename: frame.File,
}
funcIdx[fnKey] = function
prof.Function = append(prof.Function, function)
}
locKey := locationKey{Function: fnKey, Line: frame.Line}
location, ok := locationIdx[locKey]
if !ok {
location = &profile.Location{
ID: uint64(len(prof.Location)) + 1,
Mapping: m,
Line: []profile.Line{{
Function: function,
Line: int64(frame.Line),
}},
}
locationIdx[locKey] = location
prof.Location = append(prof.Location, location)
}
sample.Location = append(sample.Location, location)
}
prof.Sample = append(prof.Sample, sample)
}
return prof
}
type symbolizedStacks map[[32]uintptr][]frameCount
func (w wallclockProfile) Symbolize(exclude *runtime.Frame) symbolizedStacks {
m := make(symbolizedStacks)
outer:
for stack0, ws := range w.stacks {
frames := runtime.CallersFrames((&runtime.StackRecord{Stack0: stack0}).Stack())
for {
frame, more := frames.Next()
if frame.Entry == exclude.Entry {
continue outer
}
m[stack0] = append(m[stack0], frameCount{Frame: &frame, Count: ws.count})
if !more {
break
}
}
}
return m
}
type frameCount struct {
*runtime.Frame
Count int
}