Skip to content

Commit

Permalink
More benchmarks (#1790)
Browse files Browse the repository at this point in the history
* Add 2 benchmarks for evaluating memory of callStorm and storm

* Add a /16 benchmark

* Make the tag mechanism clearer
  • Loading branch information
jnwatson authored Jul 3, 2020
1 parent a665cfd commit 3b733b2
Showing 1 changed file with 55 additions and 3 deletions.
58 changes: 55 additions & 3 deletions scripts/benchmark_cortex.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
Benchmark cortex operations
TODO: separate client process, multiple clients
TODO: tagprops, regex, control flow, node data, multiple layers, spawn option, remote layer
TODO: tagprops, regex, control flow, node data, multiple layers, spawn option
'''

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -179,6 +179,10 @@ def printreport(self, configname: str):

def reportdata(self):
retn = []
if self.num_iters < 3:
print('--niters must be > 2 for effective statistics')
return retn

for name, measurements in self.measurements.items():
# ms = ', '.join(f'{m[0]:0.3}' for m in measurements)
tottimes = [m[0] for m in measurements[1:]]
Expand Down Expand Up @@ -229,6 +233,14 @@ async def do00EmptyQuery(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) ->
assert count == 0
return self.workfactor // 10

@benchmark()
async def do00NewQuery(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
for i in range(self.workfactor):
count = await acount(prox.eval(f'$x={i}'))

assert count == 0
return self.workfactor

@benchmark({'official'})
async def do01SimpleCount(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
count = await acount(prox.eval('inet:ipv4 | count | spin'))
Expand Down Expand Up @@ -295,6 +307,36 @@ async def do07AAddNodes(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) ->
assert count == self.workfactor
return count

@benchmark({'addnodes', })
async def do07AAddNodesCallStorm(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
tags_to_add = '+#test'
count = 0

for node in self.testdata.asns2prop:
props_to_add = f":name = {node[1]['props']['name']}"
form, valu = node[0]

opts = {'vars': {'valu': valu}}
await prox.callStorm(f'[ {form}=$valu {props_to_add} {tags_to_add}] return($node.pack(dorepr=1))',
opts=opts)
count += 1
assert count == self.workfactor
return count

@benchmark({'addnodes', })
async def do07AAddNodesStorm(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
tags_to_add = '+#test'
count = 0

for node in self.testdata.asns2prop:
props_to_add = f":name = {node[1]['props']['name']}"
form, valu = node[0]

opts = {'vars': {'valu': valu}}
count += await acount(prox.storm(f'[ {form}=$valu {props_to_add} {tags_to_add}]', opts=opts))
assert count == self.workfactor
return count

@benchmark({'addnodes'})
async def do07AAddNodesSync(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
count = await acount(prox.addNodes(self.testdata.asns2))
Expand Down Expand Up @@ -354,6 +396,13 @@ async def do10AutoAdds(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> i
assert count == self.workfactor
return self.workfactor

@benchmark()
async def do10SlashAdds(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
q = '[ inet:ipv4=1.2.0.0/16 ] | spin'
count = await acount(prox.eval(q))
assert count == 0
return 2 ** 16

@benchmark()
async def do10Formatting(self, core: s_cortex.Cortex, prox: s_telepath.Proxy) -> int:
'''
Expand Down Expand Up @@ -505,8 +554,8 @@ def getParser():
help='Prefix to append to the autogenerated filename for json output.')
parser.add_argument('--bench', '-b', nargs='*', default=None,
help='Prefixes of which benchmarks to run (defaults to run all)')
parser.add_argument('--tag', '-t', default='official',
help='Tag of which suite to run (defaults to "official")')
parser.add_argument('--tag', '-t', default=None,
help='Tag of which suite to run (defaults to "official" if bench not set)')
parser.add_argument('--do-profiling', action='store_true')
return parser

Expand All @@ -519,6 +568,9 @@ def getParser():
print('Error: module "yappi" must be installed to use --do-profiling')
sys.exit(1)

if opts.bench is None and opts.tag is None:
opts.tag = 'official'

asyncio.run(benchmarkAll(opts.config, 1, opts.workfactor, opts.tmpdir,
jsondir=opts.jsondir, jsonprefix=opts.jsonprefix,
niters=opts.niters, bench=opts.bench, do_profiling=opts.do_profiling, tag=opts.tag))

0 comments on commit 3b733b2

Please sign in to comment.