Skip to content

Commit

Permalink
tools/performance: Start gathering sharable infrastructure
Browse files Browse the repository at this point in the history
Relevant to: RobotLocomotion#14464

Gather the sharable bits of experiment execution scripting and port the cassie
benchmark to use it.

This is a WIP branch, exploring the idea of factoring some experiment-execution
machinery out of the cassie_benchmark example.  I'm not greatly sold on how it
looks so far, though I think it is potentially useful enough and solves enough
distracting subproblems to be worth some sort of better expression.
  • Loading branch information
rpoyner-tri committed Jan 26, 2021
1 parent 2f41e7f commit c7b74d9
Show file tree
Hide file tree
Showing 7 changed files with 283 additions and 145 deletions.
8 changes: 1 addition & 7 deletions examples/multibody/cassie_benchmark/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,9 @@ sh_test(
name = "record_results",
size = "small",
srcs = ["record_results.sh"],
# Debug-configured test runs are nice for coverage, but not very useful
# otherwise. Don't waste too much time on them.
args = select({
"//tools/cc_toolchain:debug": ["--benchmark_repetitions=1"],
"//conditions:default": [],
}),
data = [
":cassie_bench",
"//tools/workspace/cc:identify_compiler",
"//tools/performance:record_results",
],
tags = ["no_valgrind_tools"],
)
Expand Down
91 changes: 6 additions & 85 deletions examples/multibody/cassie_benchmark/conduct_experiment
Original file line number Diff line number Diff line change
Expand Up @@ -3,93 +3,14 @@

set -e -u -o pipefail

ME=$(readlink -f $0)
HERE=$(dirname $ME)
TARGET=//examples/multibody/cassie_benchmark:record_results
ME=$(python3 -c 'import os; print(os.path.realpath("'"$0"'"))')
HERE=$(dirname $ME)

NO_TURBO_CONTROL_FILE=/sys/devices/system/cpu/intel_pstate/no_turbo

CPU_GOVERNOR=
NO_TURBO=

die () {
echo $"$@"
exit 1
}

is_default_ubuntu () {
[[ $(uname) = "Linux" && $(echo $(lsb_release -irs)) = "Ubuntu 18.04" ]]
}

is_default_compiler () {
# Use deep bash magic to assert variables are unset.
[[ -z ${CC+x} && -z ${CXX+x} ]]
}

is_supported_cpu () {
[[ -e "$NO_TURBO_CONTROL_FILE" ]]
}

say () {
echo
echo === "$@" ===
echo
}

get_cpu_governor () {
cpupower frequency-info -p |sed -n 's%.*governor "\([^"]*\)".*%\1%pg'
}

set_cpu_governor () {
sudo cpupower frequency-set --governor "$1"
}

get_no_turbo () {
cat "$NO_TURBO_CONTROL_FILE"
}

set_no_turbo () {
sudo sh -c "echo $1 > $NO_TURBO_CONTROL_FILE"
}

clean () {
say Restore CPU speed settings.
[[ -n "$CPU_GOVERNOR" ]] && set_cpu_governor "$CPU_GOVERNOR"
[[ -n "$NO_TURBO" ]] && set_no_turbo "$NO_TURBO"
}

say Validate input.
[[ "$#" -ge 1 ]] || die "missing argument: destination directory"
DESTINATION="$1"
OUTPUT_DIR="$1"
shift

say Validate environment.
is_default_ubuntu || die "experiments only supported on default platform"
is_default_compiler || die "experiments only supported with default compiler"
is_supported_cpu || die "experiments only supported with Intel CPUs"

say Validate sudo access, to avoid later interruptions.
sudo -v

say Install tools for CPU speed control.
sudo apt install linux-tools-$(uname -r)

say Build code.
bazel build "$TARGET"

say Wait for lingering activity to subside.
sync
sleep 10

say Control CPU speed variation.
trap clean EXIT
CPU_GOVERNOR=$(get_cpu_governor)
NO_TURBO=$(get_no_turbo)
set_cpu_governor performance
set_no_turbo 1

say Run the experiment.
bazel run "$TARGET" -- "$@"
cd "$HERE"/../../..

say Save data.
"$HERE"/copy_results_to "$DESTINATION"
./tools/performance/benchmark_tool conduct_experiment \
$TARGET "$OUTPUT_DIR" -- "$@"
19 changes: 5 additions & 14 deletions examples/multibody/cassie_benchmark/copy_results_to
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,10 @@

set -e -u -o pipefail

die () {
echo $"$@"
exit 1
}
TARGET=//examples/multibody/cassie_benchmark:record_results
ME=$(python3 -c 'import os; print(os.path.realpath("'"$0"'"))')
HERE=$(dirname $ME)

cd "$HERE"/../../..

[[ "$#" -ge 1 ]] || die "missing argument: destination directory"

DST="$1"

TESTLOGS=$(bazel info bazel-testlogs)
TARGET="examples/multibody/cassie_benchmark/record_results"
SRC="${TESTLOGS}/${TARGET}/test.outputs"

mkdir -p "$DST"
cp -av "$SRC"/* "$DST"
./tools/performance/benchmark_tool copy_results $TARGET "$1"
42 changes: 3 additions & 39 deletions examples/multibody/cassie_benchmark/record_results.sh
Original file line number Diff line number Diff line change
@@ -1,42 +1,6 @@
#!/bin/bash
# Collect context information for a benchmark experiment.
# TODO(rpoyner-tri) find a robust way of recording source code version
# information.

set -e -u -o pipefail

uname -a > ${TEST_UNDECLARED_OUTPUTS_DIR}/kernel.txt || true

# Fill this in with a platform-specific command to control processor affinity,
# if any.
AFFINITY_COMMAND=""

case $(uname) in
Linux)
lsb_release -idrc
# Choosing processor #0 is arbitrary. It is up to experimenters
# to ensure it is reliably idle during experiments.
AFFINITY_COMMAND="taskset 0x1"
;;
Darwin)
sw_vers
;;
*)
echo unknown
;;
esac > ${TEST_UNDECLARED_OUTPUTS_DIR}/os.txt

${TEST_SRCDIR}/drake/tools/workspace/cc/identify_compiler \
> ${TEST_UNDECLARED_OUTPUTS_DIR}/compiler.txt

${AFFINITY_COMMAND} \
${TEST_SRCDIR}/drake/examples/multibody/cassie_benchmark/cassie_bench \
--benchmark_display_aggregates_only=true \
--benchmark_repetitions=9 \
--benchmark_out_format=json \
--benchmark_out=${TEST_UNDECLARED_OUTPUTS_DIR}/results.json \
"$@" \
>& ${TEST_UNDECLARED_OUTPUTS_DIR}/summary.txt

echo Full results are in:
echo ${TEST_UNDECLARED_OUTPUTS_DIR}/
${TEST_SRCDIR}/drake/tools/performance/record_results \
${TEST_SRCDIR}/drake/examples/multibody/cassie_benchmark/cassie_bench \
"$@"
16 changes: 16 additions & 0 deletions tools/performance/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# -*- python -*-

load("//tools/lint:lint.bzl", "add_lint_tests")
load("//tools/skylark:py.bzl", "py_binary")

package(default_visibility = ["//visibility:public"])

py_binary(
name = "record_results",
srcs = ["record_results.py"],
data = [
"//tools/workspace/cc:identify_compiler",
],
)

add_lint_tests()
166 changes: 166 additions & 0 deletions tools/performance/benchmark_tool
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
#!/usr/bin/env python3

"""Tool to help with controlled benchmark experiments.
"""

import argparse
import contextlib
import os
import re
import shutil
import subprocess
import sys
import time

# This bit may need adjusting if this file moves to a new directory.
WORKSPACE = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))


def is_default_ubuntu():
release_info = subprocess.check_output(
["lsb_release", "-irs"], encoding='utf-8')
return (os.uname().sysname == "Linux" and
"Ubuntu\n18.04" in release_info)


def is_default_compiler():
return (os.environ.get("CC") is None and
os.environ.get("CXX") is None)


def say(*args):
print(f"\n=== {' '.join(args)} ===\n")


def run(*args):
subprocess.run(list(args), check=True)


def sudo(*args):
run('sudo', *args)


class CpuSpeedSettings:
NO_TURBO_CONTROL_FILE = "/sys/devices/system/cpu/intel_pstate/no_turbo"

def is_supported_cpu(self):
return os.path.exists(self.NO_TURBO_CONTROL_FILE)

def get_cpu_governor(self):
text = subprocess.check_output(
["cpupower", "frequency-info", "-p"], encoding='utf-8')
m = re.search(r'\bgovernor "([^"]*)" ', text)
return m.group(1)

def set_cpu_governor(self, governor):
sudo('cpupower', 'frequency-set', '--governor', governor)

def get_no_turbo(self):
with open(self.NO_TURBO_CONTROL_FILE, 'r', encoding='utf-8') as fo:
return fo.read().strip()

def set_no_turbo(self, no_turbo):
sudo('sh', '-c', f"echo {no_turbo} > {self.NO_TURBO_CONTROL_FILE}")

@contextlib.contextmanager
def scope(self, governor, no_turbo):
say("Control CPU speed variation.")
old_gov = self.get_cpu_governor()
old_nt = self.get_no_turbo()
try:
self.set_cpu_governor(governor)
self.set_no_turbo(no_turbo)
yield
finally:
say("Restore CPU speed settings.")
self.set_no_turbo(old_nt)
self.set_cpu_governor(old_gov)


def target_to_path_fragment(target):
# convert target (even relative??) to path fragment
target_fragment = target
if target_fragment.startswith('//'):
target_fragment = target_fragment[2:]
return os.path.join(*target_fragment.split(':'))


def conduct_experiment(args):
say("Validate environment.")
assert is_default_ubuntu(), "experiments only supported on default platform"
assert is_default_compiler(), "experiments only supported on default compiler"
assert CpuSpeedSettings().is_supported_cpu(), (
"experiments only supported with Intel CPUs")

say("Validate sudo access, to avoid later interruptions.")
sudo("-v")

say("Install tools for CPU speed control.")
kernel_name = subprocess.check_output(
['uname', '-r'], encoding='utf-8').strip()
sudo('apt', 'install', f'linux-tools-{kernel_name}')

say("Build code.")
run('bazel', 'build', args.target)

say("Wait for lingering activity to subside.")
time.sleep(10)

with CpuSpeedSettings().scope(governor="performance", no_turbo="1"):
say("Run the experiment.")
run('bazel', 'run', args.target, '--', *args.extra_args)

say(f"Save data to {args.output_dir}/.")
copy_results(args)


def copy_results(args):
src = os.path.join(WORKSPACE, 'bazel-testlogs',
target_to_path_fragment(args.target),
'test.outputs')
shutil.rmtree(args.output_dir, ignore_errors=True)
shutil.copytree(src, args.output_dir)


def main():
# Don't run under bazel; this program issues bazel commands.
assert "runfiles" not in ':'.join(sys.path), "Don't run under bazel!"

parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(
help='subcommand to run', dest='subcommand')
subparsers.required = True

def add_common_args(parser):
parser.add_argument(
'target', metavar='TARGET',
help='bazel target of benchmark program')
parser.add_argument(
'output_dir', metavar='OUTPUT-DIR',
help='output directory for benchmark data;'
' any existing contents will be erased.')

parser_conduct_experiment = subparsers.add_parser(
'conduct_experiment',
help='copy benchmark data for TARGET'
' from the bazel-testlogs tree to OUTPUT-DIR')
add_common_args(parser_conduct_experiment)
parser_conduct_experiment.add_argument(
'extra_args', nargs='*',
help='extra arguments passed to the underlying executable')
parser_conduct_experiment.set_defaults(func=conduct_experiment)

parser_copy_results = subparsers.add_parser(
'copy_results',
help='copy benchmark data for TARGET'
' from the bazel-testlogs tree to OUTPUT-DIR')
add_common_args(parser_copy_results)
parser_copy_results.set_defaults(func=copy_results)

args = parser.parse_args()
args.func(args)


if __name__ == '__main__':
main()
Loading

0 comments on commit c7b74d9

Please sign in to comment.