From 715fba1d908db317602034529227beea9bb31b23 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 21 Dec 2022 19:15:19 +0000 Subject: [PATCH] sweet: add support for execution traces and measuring trace overhead For golang/go#57175. Change-Id: I999773e8be28c46fb5d4f6a79a94d542491e3754 --- sweet/benchmarks/go-build/main.go | 42 +++++++++---- sweet/benchmarks/gvisor/common.go | 5 +- sweet/benchmarks/internal/driver/driver.go | 39 ++++++++++-- sweet/benchmarks/tile38/main.go | 69 +++++++++++++++++++-- sweet/cmd/sweet/benchmark.go | 70 ++++++++++++++++------ sweet/cmd/sweet/run.go | 41 ++++++++++++- sweet/common/profile/profile.go | 8 +-- 7 files changed, 226 insertions(+), 48 deletions(-) diff --git a/sweet/benchmarks/go-build/main.go b/sweet/benchmarks/go-build/main.go index c49ad0b..6045cd4 100644 --- a/sweet/benchmarks/go-build/main.go +++ b/sweet/benchmarks/go-build/main.go @@ -99,27 +99,27 @@ func run(pkgPath string) error { // Handle any CPU profiles produced, and merge them. // Then, write them out to the canonical profiles above. if driver.ProfilingEnabled(driver.ProfileCPU) { - compileProfile, err := mergeProfiles(tmpDir, profilePrefix("compile", driver.ProfileCPU)) + compileProfile, err := mergePprofProfiles(tmpDir, profilePrefix("compile", driver.ProfileCPU)) if err != nil { return err } - if err := driver.WriteProfile(compileProfile, driver.ProfileCPU, name+"Compile"); err != nil { + if err := driver.WritePprofProfile(compileProfile, driver.ProfileCPU, name+"Compile"); err != nil { return err } - linkProfile, err := mergeProfiles(tmpDir, profilePrefix("link", driver.ProfileCPU)) + linkProfile, err := mergePprofProfiles(tmpDir, profilePrefix("link", driver.ProfileCPU)) if err != nil { return err } - if err := driver.WriteProfile(linkProfile, driver.ProfileCPU, name+"Link"); err != nil { + if err := driver.WritePprofProfile(linkProfile, driver.ProfileCPU, name+"Link"); err != nil { return err } } if driver.ProfilingEnabled(driver.ProfileMem) { - if err := copyProfiles(tmpDir, "compile", driver.ProfileMem, name+"Compile"); err != nil { + if err := copyPprofProfiles(tmpDir, "compile", driver.ProfileMem, name+"Compile"); err != nil { return err } - if err := copyProfiles(tmpDir, "link", driver.ProfileMem, name+"Link"); err != nil { + if err := copyPprofProfiles(tmpDir, "link", driver.ProfileMem, name+"Link"); err != nil { return err } } @@ -128,11 +128,25 @@ func run(pkgPath string) error { return err } } + if driver.ProfilingEnabled(driver.ProfileTrace) { + entries, err := os.ReadDir(tmpDir) + if err != nil { + return err + } + for _, entry := range entries { + if !strings.HasPrefix(entry.Name(), profilePrefix("compile", driver.ProfileTrace)) { + continue + } + if err := driver.CopyProfile(filepath.Join(tmpDir, entry.Name()), driver.ProfileTrace, name+"Compile"); err != nil { + return err + } + } + } return printOtherResults(tmpResultsDir()) } -func mergeProfiles(dir, prefix string) (*profile.Profile, error) { - profiles, err := sprofile.ReadDir(dir, func(name string) bool { +func mergePprofProfiles(dir, prefix string) (*profile.Profile, error) { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return strings.HasPrefix(name, prefix) }) if err != nil { @@ -141,16 +155,16 @@ func mergeProfiles(dir, prefix string) (*profile.Profile, error) { return profile.Merge(profiles) } -func copyProfiles(dir, bin string, typ driver.ProfileType, finalPrefix string) error { +func copyPprofProfiles(dir, bin string, typ driver.ProfileType, finalPrefix string) error { prefix := profilePrefix(bin, typ) - profiles, err := sprofile.ReadDir(dir, func(name string) bool { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return strings.HasPrefix(name, prefix) }) if err != nil { return err } for _, profile := range profiles { - if err := driver.WriteProfile(profile, typ, finalPrefix); err != nil { + if err := driver.WritePprofProfile(profile, typ, finalPrefix); err != nil { return err } } @@ -200,8 +214,12 @@ func runToolexec() error { return cmd.Run() } var extraFlags []string - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { + for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem, driver.ProfileTrace} { if driver.ProfilingEnabled(typ) { + if bin == "link" && typ == driver.ProfileTrace { + // TODO(mknyszek): Traces are not supported for the linker. + continue + } // Stake a claim for a filename. f, err := os.CreateTemp(tmpDir, profilePrefix(bin, typ)) if err != nil { diff --git a/sweet/benchmarks/gvisor/common.go b/sweet/benchmarks/gvisor/common.go index aeb2879..427fae1 100644 --- a/sweet/benchmarks/gvisor/common.go +++ b/sweet/benchmarks/gvisor/common.go @@ -30,7 +30,7 @@ func (c *config) profilePath(typ driver.ProfileType) string { func (cfg *config) runscCmd(arg ...string) *exec.Cmd { var cmd *exec.Cmd goProfiling := false - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { + for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem, driver.ProfileTrace} { if driver.ProfilingEnabled(typ) { goProfiling = true break @@ -45,6 +45,9 @@ func (cfg *config) runscCmd(arg ...string) *exec.Cmd { if driver.ProfilingEnabled(driver.ProfileMem) { arg = append([]string{"-profile-heap", cfg.profilePath(driver.ProfileMem)}, arg...) } + if driver.ProfilingEnabled(driver.ProfileTrace) { + arg = append([]string{"-trace", cfg.profilePath(driver.ProfileTrace)}, arg...) + } if driver.ProfilingEnabled(driver.ProfilePerf) { cmd = exec.Command("perf", append([]string{"record", "-o", cfg.profilePath(driver.ProfilePerf), cfg.runscPath}, arg...)...) } else { diff --git a/sweet/benchmarks/internal/driver/driver.go b/sweet/benchmarks/internal/driver/driver.go index bb6e3eb..ea639fd 100644 --- a/sweet/benchmarks/internal/driver/driver.go +++ b/sweet/benchmarks/internal/driver/driver.go @@ -13,6 +13,7 @@ import ( "os/exec" "path/filepath" "runtime/pprof" + "runtime/trace" "sort" "strconv" "strings" @@ -28,6 +29,7 @@ var ( memProfileDir string perfDir string perfFlags string + traceDir string short bool ) @@ -37,6 +39,7 @@ func SetFlags(f *flag.FlagSet) { f.StringVar(&memProfileDir, "memprofile", "", "write a memory profile to the given directory after every benchmark run") f.StringVar(&perfDir, "perf", "", "write a Linux perf data file to the given directory after every benchmark run") f.StringVar(&perfFlags, "perf-flags", "", "pass the following additional flags to Linux perf") + f.StringVar(&traceDir, "trace", "", "write an execution trace to the given directory after every benchmark run") } const ( @@ -104,6 +107,12 @@ func DoPerf(v bool) RunOption { } } +func DoTrace(v bool) RunOption { + return func(b *B) { + b.doProfile[ProfileTrace] = v + } +} + func BenchmarkPID(pid int) RunOption { return func(b *B) { b.pid = pid @@ -111,6 +120,7 @@ func BenchmarkPID(pid int) RunOption { b.doProfile[ProfileCPU] = false b.doProfile[ProfileMem] = false b.doProfile[ProfilePerf] = false + b.doProfile[ProfileTrace] = false } } } @@ -136,6 +146,7 @@ var InProcessMeasurementOptions = []RunOption{ DoCPUProfile(true), DoMemProfile(true), DoPerf(true), + DoTrace(true), } type B struct { @@ -184,6 +195,10 @@ func (b *B) shouldProfile(typ ProfileType) bool { return b.doProfile[typ] && ProfilingEnabled(typ) } +func (b *B) Name() string { + return b.name +} + func (b *B) StartTimer() { if b.shouldProfile(ProfileCPU) { pprof.StartCPUProfile(b.profiles[ProfileCPU]) @@ -449,6 +464,12 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { } } + if b.shouldProfile(ProfileTrace) { + if err := trace.Start(b.profiles[ProfileTrace]); err != nil { + return err + } + } + b.StartTimer() // Run the benchmark itself. @@ -513,6 +534,9 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { return err } } + if typ == ProfileTrace { + trace.Stop() + } f.Close() } @@ -524,15 +548,17 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { type ProfileType string const ( - ProfileCPU ProfileType = "cpu" - ProfileMem ProfileType = "mem" - ProfilePerf ProfileType = "perf" + ProfileCPU ProfileType = "cpu" + ProfileMem ProfileType = "mem" + ProfilePerf ProfileType = "perf" + ProfileTrace ProfileType = "trace" ) var ProfileTypes = []ProfileType{ ProfileCPU, ProfileMem, ProfilePerf, + ProfileTrace, // TODO(mknyszek): Consider renaming ProfileType. } func ProfilingEnabled(typ ProfileType) bool { @@ -543,11 +569,13 @@ func ProfilingEnabled(typ ProfileType) bool { return memProfileDir != "" case ProfilePerf: return perfDir != "" + case ProfileTrace: + return traceDir != "" } panic("bad profile type") } -func WriteProfile(prof *profile.Profile, typ ProfileType, pattern string) error { +func WritePprofProfile(prof *profile.Profile, typ ProfileType, pattern string) error { if !ProfilingEnabled(typ) { return fmt.Errorf("this type of profile is not currently enabled") } @@ -589,6 +617,9 @@ func newProfileFile(typ ProfileType, pattern string) (*os.File, error) { case ProfilePerf: outDir = perfDir patternSuffix = ".perf" + case ProfileTrace: + outDir = traceDir + patternSuffix = ".trace" } return os.CreateTemp(outDir, pattern+patternSuffix) } diff --git a/sweet/benchmarks/tile38/main.go b/sweet/benchmarks/tile38/main.go index ea3027a..49ee1d7 100644 --- a/sweet/benchmarks/tile38/main.go +++ b/sweet/benchmarks/tile38/main.go @@ -11,12 +11,14 @@ import ( "fmt" "io" "math/rand" + "net/http" "os" "os/exec" "path/filepath" "runtime" "sort" "strconv" + "sync" "sync/atomic" "time" @@ -48,6 +50,8 @@ func (c *config) profilePath(typ driver.ProfileType) string { fname = "mem.prof" case driver.ProfilePerf: fname = "perf.data" + case driver.ProfileTrace: + fname = "runtime.trace" default: panic("unsupported profile type " + string(typ)) } @@ -58,7 +62,7 @@ var cliCfg config func init() { driver.SetFlags(flag.CommandLine) - flag.StringVar(&cliCfg.host, "host", "", "hostname of tile38 server") + flag.StringVar(&cliCfg.host, "host", "127.0.0.1", "hostname of tile38 server") flag.IntVar(&cliCfg.port, "port", 9851, "port for tile38 server") flag.Int64Var(&cliCfg.seed, "seed", 0, "seed for PRNG") flag.StringVar(&cliCfg.serverBin, "server", "", "path to tile38 server binary") @@ -219,9 +223,10 @@ func launchServer(cfg *config, out io.Writer) (*exec.Cmd, error) { // Set up arguments. srvArgs := []string{ "-d", cfg.dataPath, - "-h", "127.0.0.1", - "-p", "9851", + "-h", cfg.host, + "-p", strconv.Itoa(cfg.port), "-threads", strconv.Itoa(cfg.serverProcs), + "-pprofport", strconv.Itoa(pprofPort), } for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { if driver.ProfilingEnabled(typ) { @@ -271,6 +276,26 @@ func launchServer(cfg *config, out io.Writer) (*exec.Cmd, error) { return nil, fmt.Errorf("timeout trying to connect to server: %v", err) } +const pprofPort = 12345 + +func (cfg *config) readTrace(benchName string) (int64, error) { + f, err := os.Create(cfg.profilePath(driver.ProfileTrace)) + if err != nil { + return 0, err + } + defer f.Close() + resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/pprof/trace", cfg.host, pprofPort)) + if err != nil { + return 0, err + } + defer resp.Body.Close() + n, err := io.Copy(f, resp.Body) + if err != nil { + return 0, err + } + return n, driver.CopyProfile(cfg.profilePath(driver.ProfileTrace), driver.ProfileTrace, benchName) +} + func runOne(bench benchmark, cfg *config) (err error) { var buf bytes.Buffer @@ -308,12 +333,12 @@ func runOne(bench benchmark, cfg *config) (err error) { // Copy it over. for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { if driver.ProfilingEnabled(typ) { - p, r := profile.Read(cfg.profilePath(typ)) + p, r := profile.ReadPprof(cfg.profilePath(typ)) if r != nil { err = r return } - if r := driver.WriteProfile(p, typ, bench.name()); r != nil { + if r := driver.WritePprofProfile(p, typ, bench.name()); r != nil { err = r return } @@ -329,12 +354,46 @@ func runOne(bench benchmark, cfg *config) (err error) { driver.DoCoreDump(true), driver.BenchmarkPID(srvCmd.Process.Pid), driver.DoPerf(true), + driver.DoTrace(true), } iters := 20 * 50000 if cfg.short { iters = 1000 } return driver.RunBenchmark(bench.name(), func(d *driver.B) error { + if driver.ProfilingEnabled(driver.ProfileTrace) { + // Handle execution tracing. + // + // TODO(mknyszek): This is kind of a hack. We really should find a way to just + // enable tracing at a lower level for the entire server run. + var traceStop chan struct{} + var traceWg sync.WaitGroup + var traceBytes uint64 + traceWg.Add(1) + traceStop = make(chan struct{}) + go func() { + defer traceWg.Done() + for { + select { + case <-traceStop: + return + default: + } + n, err := cfg.readTrace(bench.name()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to read trace: %v", err) + return + } + traceBytes += uint64(n) + } + }() + defer func() { + // Stop the trace loop. + close(traceStop) + traceWg.Wait() + d.Report("trace-bytes", traceBytes) + }() + } return bench.run(d, cfg.host, cfg.port, cfg.serverProcs, iters) }, opts...) } diff --git a/sweet/cmd/sweet/benchmark.go b/sweet/cmd/sweet/benchmark.go index 8ad98a0..a8818c3 100644 --- a/sweet/cmd/sweet/benchmark.go +++ b/sweet/cmd/sweet/benchmark.go @@ -218,7 +218,12 @@ func (b *benchmark) execute(cfgs []*common.Config, r *runCfg) error { } // Perform a setup step for each config for the benchmark. - setups := make([]common.RunConfig, 0, len(cfgs)) + type setup struct { + typ string + cfg *common.Config + rcfg common.RunConfig + } + setups := make([]setup, 0, len(cfgs)) for _, pcfg := range cfgs { // Local copy for per-benchmark environment adjustments. cfg := pcfg.Copy() @@ -279,9 +284,10 @@ func (b *benchmark) execute(cfgs []*common.Config, r *runCfg) error { mkdirAll(resultsBinDir) copyDirContents(resultsBinDir, binDir) } - if r.cpuProfile || r.memProfile || r.perf { + var resultsProfilesDir string + if r.cpuProfile || r.memProfile || r.perf || r.trace != traceOff { // Create a directory for any profile files to live in. - resultsProfilesDir := r.runProfilesDir(b, cfg) + resultsProfilesDir = r.runProfilesDir(b, cfg) mkdirAll(resultsProfilesDir) // We need to pass arguments to the benchmark binary to generate @@ -298,6 +304,9 @@ func (b *benchmark) execute(cfgs []*common.Config, r *runCfg) error { args = append(args, "-perf-flags", r.perfFlags) } } + if r.trace == traceOn { + args = append(args, "-trace", resultsProfilesDir) + } } results, err := os.Create(filepath.Join(resultsDir, fmt.Sprintf("%s.results", cfg.Name))) @@ -305,48 +314,71 @@ func (b *benchmark) execute(cfgs []*common.Config, r *runCfg) error { return fmt.Errorf("create %s results file for %s: %v", b.name, cfg.Name, err) } defer results.Close() - setups = append(setups, common.RunConfig{ - BinDir: binDir, - TmpDir: tmpDir, - AssetsDir: assetsDir, - Args: args, - Results: results, - Short: r.short, + setups = append(setups, setup{ + cfg: cfg, + rcfg: common.RunConfig{ + BinDir: binDir, + TmpDir: tmpDir, + AssetsDir: assetsDir, + Args: args, + Results: results, + Short: r.short, + }, }) + if r.trace == traceCompare { + // Create a second results file and run configuration with tracing enabled. + traceResults, err := os.Create(filepath.Join(resultsDir, fmt.Sprintf("%s-trace.results", cfg.Name))) + if err != nil { + return fmt.Errorf("create %s trace results file for %s: %v", b.name, cfg.Name, err) + } + defer traceResults.Close() + + // Copy the last config and update it to enable tracing. + traceCfg := setups[len(setups)-1] + traceCfg.typ = "trace" + traceCfg.rcfg.Args = append(args, "-trace", resultsProfilesDir) + traceCfg.rcfg.Results = traceResults + setups = append(setups, traceCfg) + } } for j := 0; j < r.count; j++ { // Execute the benchmark for each configuration. - for i, setup := range setups { + for _, setup := range setups { if hasAssets { // Set up assets directory for test run. - r.logCopyDirCommand(b.name, setup.AssetsDir) - if err := fileutil.CopyDir(setup.AssetsDir, assetsFSDir, r.assetsFS); err != nil { + r.logCopyDirCommand(b.name, setup.rcfg.AssetsDir) + if err := fileutil.CopyDir(setup.rcfg.AssetsDir, assetsFSDir, r.assetsFS); err != nil { return err } } - log.Printf("Running benchmark %s for %s: run %d", b.name, cfgs[i].Name, j+1) + // If the setup has a special type, print it. + specialType := "" + if setup.typ != "" { + specialType = "(" + setup.typ + ")" + } + log.Printf("Running benchmark %s for %s: run %d %s", b.name, setup.cfg.Name, j+1, specialType) // Force a GC now because we're about to turn it off. runtime.GC() // Hold your breath: we're turning off GC for the duration of the // run so that the suite's GC doesn't start blasting on all Ps, // introducing undue noise into the experiments. gogc := debug.SetGCPercent(-1) - if err := b.harness.Run(cfgs[i], &setup); err != nil { + if err := b.harness.Run(setup.cfg, &setup.rcfg); err != nil { debug.SetGCPercent(gogc) - setup.Results.Close() - return fmt.Errorf("run benchmark %s for config %s: %v", b.name, cfgs[i].Name, err) + setup.rcfg.Results.Close() + return fmt.Errorf("run benchmark %s for config %s: %v", b.name, setup.cfg.Name, err) } debug.SetGCPercent(gogc) // Clean up tmp directory so benchmarks may assume it's empty. - if err := rmDirContents(setup.TmpDir); err != nil { + if err := rmDirContents(setup.rcfg.TmpDir); err != nil { return err } if hasAssets { // Clean up assets directory just in case any of the files were written to. - if err := rmDirContents(setup.AssetsDir); err != nil { + if err := rmDirContents(setup.rcfg.AssetsDir); err != nil { return err } } diff --git a/sweet/cmd/sweet/run.go b/sweet/cmd/sweet/run.go index 0e69f4a..b7319db 100644 --- a/sweet/cmd/sweet/run.go +++ b/sweet/cmd/sweet/run.go @@ -38,6 +38,33 @@ func (c *csvFlag) Set(input string) error { return nil } +type traceArg string + +const ( + traceOff traceArg = "false" + traceOn traceArg = "true" + traceCompare traceArg = "compare" +) + +var traceArgNames = []string{string(traceOff), string(traceOn), string(traceCompare)} + +func (a *traceArg) String() string { + return string(*a) +} + +func (a *traceArg) Set(input string) error { + switch t := traceArg(input); t { + case traceOff, traceOn, traceCompare: + *a = t + return nil + } + return fmt.Errorf("invalid -trace argument %q, want one of: %s", input, strings.Join(traceArgNames, ", ")) +} + +func (a *traceArg) IsBoolFlag() bool { + return true +} + const ( runLongDesc = `Execute benchmarks in the suite against GOROOTs provided in TOML configuration files. Note: by default, this command expects to run from /path/to/x/benchmarks/sweet.` @@ -62,6 +89,7 @@ type runCfg struct { memProfile bool perf bool perfFlags string + trace traceArg pgo bool pgoCount int short bool @@ -144,6 +172,8 @@ func (*runCmd) PrintUsage(w io.Writer, base string) { } func (c *runCmd) SetFlags(f *flag.FlagSet) { + c.runCfg.trace = traceOff + f.StringVar(&c.runCfg.resultsDir, "results", "./results", "location to write benchmark results to") f.StringVar(&c.runCfg.benchDir, "bench-dir", "./benchmarks", "the benchmarks directory in the sweet source") f.StringVar(&c.runCfg.assetsDir, "assets-dir", "", "a directory containing uncompressed assets for sweet benchmarks, usually for debugging Sweet (overrides -cache)") @@ -156,6 +186,7 @@ func (c *runCmd) SetFlags(f *flag.FlagSet) { f.StringVar(&c.runCfg.perfFlags, "perf-flags", "", "the flags to pass to Linux perf if -perf is set") f.BoolVar(&c.pgo, "pgo", false, "perform PGO testing; for each config, collect profiles from a baseline run which are used to feed into a generated PGO config") f.IntVar(&c.runCfg.pgoCount, "pgo-count", 0, "the number of times to run profiling runs for -pgo; defaults to the value of -count if <=5, or 5 if higher") + f.Var(&c.runCfg.trace, "trace", "whether to generate an execution trace (accepts 'true', 'false', and 'compare', the last of which is for measuring overheads)") f.IntVar(&c.runCfg.count, "count", 0, fmt.Sprintf("the number of times to run each benchmark (default %d)", countDefault)) f.BoolVar(&c.quiet, "quiet", false, "whether to suppress activity output on stderr (no effect on -shell)") @@ -362,9 +393,13 @@ func (c *runCmd) Run(args []string) error { if len(unknown) != 0 { return fmt.Errorf("unknown benchmarks: %s", strings.Join(unknown, ", ")) } - countString := fmt.Sprintf("%d runs", c.runCfg.count) + multiplier := 1 + if c.trace == traceCompare { + multiplier = 2 + } + countString := fmt.Sprintf("%d runs", c.runCfg.count*multiplier) if c.pgo { - countString += fmt.Sprintf(", %d pgo runs", c.runCfg.pgoCount) + countString += fmt.Sprintf(", %d pgo runs", c.runCfg.pgoCount*multiplier) } countString += fmt.Sprintf(" per config (%d)", len(configs)) log.Printf("Benchmarks: %s (%s)", strings.Join(benchmarkNames(benchmarks), " "), countString) @@ -456,7 +491,7 @@ func (c *runCmd) preparePGO(configs []*common.Config, benchmarks []*benchmark) ( var cpuProfileRe = regexp.MustCompile(`^.*\.cpu[0-9]+$`) func mergeCPUProfiles(dir string) (string, error) { - profiles, err := sprofile.ReadDir(dir, func(name string) bool { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return cpuProfileRe.FindString(name) != "" }) if err != nil { diff --git a/sweet/common/profile/profile.go b/sweet/common/profile/profile.go index d448997..d23bb98 100644 --- a/sweet/common/profile/profile.go +++ b/sweet/common/profile/profile.go @@ -12,7 +12,7 @@ import ( "github.com/google/pprof/profile" ) -func Read(filename string) (*profile.Profile, error) { +func ReadPprof(filename string) (*profile.Profile, error) { f, err := os.Open(filename) if err != nil { return nil, err @@ -21,8 +21,8 @@ func Read(filename string) (*profile.Profile, error) { return profile.Parse(f) } -// ReadDir reads all profiles in dir whose name matches match(name). -func ReadDir(dir string, match func(string) bool) ([]*profile.Profile, error) { +// ReadDir reads all pprof profiles in dir whose name matches match(name). +func ReadDirPprof(dir string, match func(string) bool) ([]*profile.Profile, error) { entries, err := os.ReadDir(dir) if err != nil { return nil, err @@ -39,7 +39,7 @@ func ReadDir(dir string, match func(string) bool) ([]*profile.Profile, error) { continue } if match(name) { - p, err := Read(path) + p, err := ReadPprof(path) if err != nil { return nil, err }