Skip to content

Commit

Permalink
Extend Bencher testing, add Memory metric (#6210)
Browse files Browse the repository at this point in the history
  • Loading branch information
eddyashton authored May 28, 2024
1 parent 5bba55c commit 0ad88b2
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 28 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/bencher.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,16 @@ jobs:
with:
fetch-depth: 0

- run: |
- name: Build and run virtual perf tests
run: |
git config --global --add safe.directory /__w/CCF/CCF
mkdir build
cd build
cmake -GNinja -DCOMPILE_TARGET=virtual ..
ninja
# Limited list of benchmarks for now, but should be extended to
# everything under a single label eventually
./tests.sh -VV -R pi_basic_virtual
./tests.sh -VV -R pi_basic_
./tests.sh -VV -R historical_query
./tests.sh -VV -R commit_latency
Expand Down
6 changes: 1 addition & 5 deletions cmake/common.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -286,18 +286,14 @@ function(add_piccolo_test)
set(ENCLAVE_TYPE "virtual")
endif()

set(TESTS_SUFFIX "${TESTS_SUFFIX}_cft")

set(TEST_NAME "${PARSED_ARGS_NAME}${TESTS_SUFFIX}")

set(LABEL_ARG "${TEST_NAME}^")

add_test(
NAME "${PARSED_ARGS_NAME}${TESTS_SUFFIX}"
COMMAND
${PYTHON} ${PARSED_ARGS_PYTHON_SCRIPT} -b . -c ${PARSED_ARGS_CLIENT_BIN}
${CCF_NETWORK_TEST_ARGS} ${PARSED_ARGS_CONSTITUTION} ${VERIFICATION_ARG}
--label ${LABEL_ARG} --snapshot-tx-interval 10000
--label ${TEST_NAME} --snapshot-tx-interval 10000
${PARSED_ARGS_ADDITIONAL_ARGS} -e ${ENCLAVE_TYPE} -t ${ENCLAVE_PLATFORM}
${NODES}
)
Expand Down
36 changes: 17 additions & 19 deletions tests/infra/basicperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import datetime
import ccf.ledger
import plotext as plt
import infra.bencher


def configure_remote_client(args, client_id, client_host, common_dir):
Expand Down Expand Up @@ -377,24 +378,25 @@ def run(args):
for remote_client in clients:
remote_client.stop()

additional_metrics = {}
metrics = []
if not args.stop_primary_after_s:
primary, _ = network.find_primary()
with primary.client() as nc:
r = nc.get("/node/memory")
assert r.status_code == http.HTTPStatus.OK.value

results = r.body.json()
current_value = results["current_allocated_heap_size"]
peak_value = results["peak_allocated_heap_size"]

# Do not upload empty metrics (virtual doesn't report memory use)
if peak_value != 0:
# Construct name for heap metric, removing ^ suffix if present
heap_peak_metric = args.label
if heap_peak_metric.endswith("^"):
heap_peak_metric = heap_peak_metric[:-1]
heap_peak_metric += "_mem"
additional_metrics[heap_peak_metric] = peak_value
if current_value != 0 and peak_value != 0:
metrics.append(
infra.bencher.Memory(
value=current_value,
high_value=peak_value,
)
)

network.stop_all_nodes()

Expand Down Expand Up @@ -587,17 +589,13 @@ def table():
f"Errors: {number_of_errors} ({number_of_errors / total_number_of_requests * 100:.2f}%)"
)

# https://github.com/microsoft/CCF/issues/6126
# with cimetrics.upload.metrics(complete=False) as metrics:
# LOG.success("Uploading results")
# metrics.put(args.label, round(throughput, 1))

# for key, value in additional_metrics.items():
# metrics.put(key, value)

metrics = {args.label: {"throughput": {"value": round(throughput, 1)}}}
with open("bencher.json", "w") as fd:
json.dump(metrics, fd)
bf = infra.bencher.Bencher()
metrics.append(infra.bencher.Throughput(round(throughput, 1)))
for metric in metrics:
bf.set(
args.label,
metric,
)

except Exception as e:
LOG.error(f"Stopping clients due to exception: {e}")
Expand Down
21 changes: 19 additions & 2 deletions tests/infra/bencher.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,32 @@ def __init__(
self.throughput = Value(value, high_value, low_value)


@dataclasses.dataclass
class Memory:
memory: Value

def __init__(
self,
value: float,
high_value: Optional[float] = None,
low_value: Optional[float] = None,
):
self.memory = Value(value, high_value, low_value)


class Bencher:
def __init__(self):
if not os.path.isfile(BENCHER_FILE):
with open(BENCHER_FILE, "w+") as bf:
json.dump({}, bf)

def set(self, key: str, value: Union[Latency, Throughput]):
def set(self, key: str, metric: Union[Latency, Throughput, Memory]):
with open(BENCHER_FILE, "r") as bf:
data = json.load(bf)
data[key] = dataclasses.asdict(value)
metric_val = dataclasses.asdict(metric)
if key in data:
data[key].update(metric_val)
else:
data[key] = metric_val
with open(BENCHER_FILE, "w") as bf:
json.dump(data, bf, indent=4)

0 comments on commit 0ad88b2

Please sign in to comment.