Skip to content

Commit

Permalink
Setup benchmark suite
Browse files Browse the repository at this point in the history
Signed-off-by: Tim Paine <[email protected]>
  • Loading branch information
timkpaine committed Jul 13, 2024
1 parent 323122e commit a09002a
Show file tree
Hide file tree
Showing 10 changed files with 162 additions and 0 deletions.
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,11 @@ csp/lib/
*.so
*.tsbuildinfo

# Benchmarks
.asv
ci/benchmarks/*
!ci/benchmarks/benchmarks.json

# Jupyter / Editors
.ipynb_checkpoints
.autoversion
Expand Down
23 changes: 23 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,29 @@ dockerps: ## spin up docker compose services for adapter testing
dockerdown: ## spin up docker compose services for adapter testing
$(DOCKER) compose -f ci/$(ADAPTER)/docker-compose.yml down

##############
# BENCHMARKS #
##############
.PHONY: benchmark benchmarks benchmark-regen benchmark-view benchmarks-regen benchmarks-view
benchmark: ## run benchmarks
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse HEAD`

# https://github.com/airspeed-velocity/asv/issues/1027
# https://github.com/airspeed-velocity/asv/issues/488
benchmark-regen:
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.3^!
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.4^!
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.5^!

benchmark-view: ## generate viewable website of benchmark results
python -m asv publish --config csp/benchmarks/asv.conf.jsonc
python -m asv preview --config csp/benchmarks/asv.conf.jsonc

# Alias
benchmarks: benchmark
benchmarks-regen: benchmark-regen
benchmarks-view: benchmark-view

###########
# VERSION #
###########
Expand Down
33 changes: 33 additions & 0 deletions ci/benchmarks/benchmarks.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
{
"stats.basic.StatsBenchmarkSuite.time_stats_qtl": {
"code": "class StatsBenchmarkSuite:\n def time_stats_qtl(self):\n def g_qtl():\n data = csp.curve(typ=np.ndarray, data=self.DATA)\n median = csp.stats.median(data, interval=self.INTERVAL)\n csp.add_graph_output(\"final_median\", median, tick_count=1)\n \n qtl_times = []\n \n for _ in range(self.NUM_SAMPLES):\n start = time.time()\n csp.run(g_qtl, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))\n post_qtl = time.time()\n qtl_times.append(post_qtl - start)\n \n avg_med = sum(qtl_times) / self.NUM_SAMPLES\n print(\n f\"Average time in {self.NUM_SAMPLES} tests for median with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_med, 2)} s\"\n )\n return avg_med\n\n def setup(self):\n self.st = datetime(2020, 1, 1)\n self.N = 1_000\n self.ARRAY_SIZE = 100\n self.TEST_TIMES = [self.st + timedelta(seconds=i) for i in range(self.N)]\n self.RANDOM_VALUES = [np.random.normal(size=(self.ARRAY_SIZE,)) for i in range(self.N)] # 100 element np array\n self.DATA = list(zip(self.TEST_TIMES, self.RANDOM_VALUES))\n self.INTERVAL = 500\n self.NUM_SAMPLES = 100",
"min_run_count": 2,
"name": "stats.basic.StatsBenchmarkSuite.time_stats_qtl",
"number": 0,
"param_names": [],
"params": [],
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "21f280e4eeceac0ca2172bed432939c57f2b2618bd26bd27d15d4ca177e2ab26",
"warmup_time": -1
},
"stats.basic.StatsBenchmarkSuite.time_stats_rank": {
"code": "class StatsBenchmarkSuite:\n def time_stats_rank(self):\n def g_rank():\n data = csp.curve(typ=np.ndarray, data=self.DATA)\n rank = csp.stats.rank(data, interval=self.INTERVAL)\n csp.add_graph_output(\"final_rank\", rank, tick_count=1)\n \n rank_times = []\n \n for _ in range(self.NUM_SAMPLES):\n start = time.time()\n csp.run(g_rank, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))\n post_rank = time.time()\n rank_times.append(post_rank - start)\n \n avg_rank = sum(rank_times) / self.NUM_SAMPLES\n print(\n f\"Average time in {self.NUM_SAMPLES} tests for rank with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_rank, 2)} s\"\n )\n return avg_rank\n\n def setup(self):\n self.st = datetime(2020, 1, 1)\n self.N = 1_000\n self.ARRAY_SIZE = 100\n self.TEST_TIMES = [self.st + timedelta(seconds=i) for i in range(self.N)]\n self.RANDOM_VALUES = [np.random.normal(size=(self.ARRAY_SIZE,)) for i in range(self.N)] # 100 element np array\n self.DATA = list(zip(self.TEST_TIMES, self.RANDOM_VALUES))\n self.INTERVAL = 500\n self.NUM_SAMPLES = 100",
"min_run_count": 2,
"name": "stats.basic.StatsBenchmarkSuite.time_stats_rank",
"number": 0,
"param_names": [],
"params": [],
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "4c302ccf942084ac2367999fc84b2ba882c2ff74cddd80a3c27c8f8a1aee333d",
"warmup_time": -1
},
"version": 2
}
1 change: 1 addition & 0 deletions conda/dev-environment-unix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- bison
- brotli
- build
Expand Down
1 change: 1 addition & 0 deletions conda/dev-environment-win.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- asv
- brotli
- build
- bump2version>=1
Expand Down
Empty file added csp/benchmarks/__init__.py
Empty file.
33 changes: 33 additions & 0 deletions csp/benchmarks/asv.conf.jsonc
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// https://asv.readthedocs.io/en/v0.6.3/asv.conf.json.html
{
"version": 1,
"project": "csp",
"project_url": "https://github.com/Point72/csp",
"repo": "../..",
"branches": ["main"],
"dvcs": "git",

"install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
"uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
"build_command": [
"python -m pip install build",
"python -m build --wheel -o {build_cache_dir} {build_dir}"
],
"environment_type": "virtualenv",
"install_timeout": 600,
"show_commit_url": "http://github.com/point72/csp/commit/",

"pythons": ["3.11"],

// "environment_type": "mamba",
// "conda_channels": ["conda-forge"],
// "conda_environment_file": "conda/dev-environment-unix.yml",

"benchmark_dir": "../../csp/benchmarks",
"env_dir": "../../.asv/env",
"results_dir": "../../ci/benchmarks",
"html_dir": "../../.asv/html",

"hash_length": 8,
"build_cache_size": 2
}
Empty file.
64 changes: 64 additions & 0 deletions csp/benchmarks/stats/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import numpy as np
import time
from datetime import datetime, timedelta

import csp


class StatsBenchmarkSuite:
def setup(self):
self.st = datetime(2020, 1, 1)
self.N = 1_000
self.ARRAY_SIZE = 100
self.TEST_TIMES = [self.st + timedelta(seconds=i) for i in range(self.N)]
self.RANDOM_VALUES = [np.random.normal(size=(self.ARRAY_SIZE,)) for i in range(self.N)] # 100 element np array
self.DATA = list(zip(self.TEST_TIMES, self.RANDOM_VALUES))
self.INTERVAL = 500
self.NUM_SAMPLES = 100

def time_stats_qtl(self):
def g_qtl():
data = csp.curve(typ=np.ndarray, data=self.DATA)
median = csp.stats.median(data, interval=self.INTERVAL)
csp.add_graph_output("final_median", median, tick_count=1)

qtl_times = []

for _ in range(self.NUM_SAMPLES):
start = time.time()
csp.run(g_qtl, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))
post_qtl = time.time()
qtl_times.append(post_qtl - start)

avg_med = sum(qtl_times) / self.NUM_SAMPLES
print(
f"Average time in {self.NUM_SAMPLES} tests for median with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_med, 2)} s"
)
return avg_med

def time_stats_rank(self):
def g_rank():
data = csp.curve(typ=np.ndarray, data=self.DATA)
rank = csp.stats.rank(data, interval=self.INTERVAL)
csp.add_graph_output("final_rank", rank, tick_count=1)

rank_times = []

for _ in range(self.NUM_SAMPLES):
start = time.time()
csp.run(g_rank, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))
post_rank = time.time()
rank_times.append(post_rank - start)

avg_rank = sum(rank_times) / self.NUM_SAMPLES
print(
f"Average time in {self.NUM_SAMPLES} tests for rank with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_rank, 2)} s"
)
return avg_rank


if __name__ == "__main__":
sbs = StatsBenchmarkSuite()
sbs.setup()
sbs.time_stats_qtl()
sbs.time_stats_rank()
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ develop = [
"sqlalchemy", # db
"threadpoolctl", # test_random
"tornado", # profiler, perspective, websocket
# benchmarks
"asv",
]
showgraph = [
"graphviz",
Expand Down

0 comments on commit a09002a

Please sign in to comment.