Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tablib and tabulate #257

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 100 additions & 48 deletions locust/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@
import hashlib
import six
from six.moves import xrange
import tablib
from tabulate import tabulate

from . import events
from .exception import StopLocust
from .log import console_logger

STATS_NAME_WIDTH = 60
PERCENTILES = (0.5, 0.66, 0.75, 0.80, 0.9, 0.95, 0.98, 0.99)

class RequestStatsAdditionError(Exception):
pass
Expand Down Expand Up @@ -65,7 +68,80 @@ def clear_all(self):
self.max_requests = None
self.last_request_timestamp = None
self.start_time = None


def percentile_column_name(self, percentile):
"""
Return the name of the column for the `percentile` value.
"""
return "{0:.0%}".format(percentile)

def get_percentile_dataset(self, include_empty=False):
data = tablib.Dataset()
data.headers = ['Method', 'Name', '# reqs']

for percentile in PERCENTILES:
data.headers.append(self.percentile_column_name(percentile))

data.headers.append("100%")

# Using iteritems() allows us to sort by the key while only using
# the value.
for _, stats in sorted(six.iteritems(self.entries)):
data.append(stats.percentile(include_empty))

total_stats = self.aggregated_stats(full_request_history=True)
if total_stats.response_times:
data.append(total_stats.percentile(include_empty))

return data

def get_request_stats_dataset(self):
data = tablib.Dataset()
data.headers = [
"Method",
"Name",
"# requests",
"# failures",
"Median response time",
"Average response time",
"Min response time",
"Max response time",
"Average Content Size",
"Requests/s",
]

# Using iteritems() allows us to sort by the key while only using
# the value.
for _, stats in sorted(six.iteritems(self.entries)):
data.append((
stats.method,
stats.name,
stats.num_requests,
stats.num_failures,
stats.median_response_time,
stats.avg_response_time,
stats.min_response_time or 0,
stats.max_response_time,
stats.avg_content_length,
stats.total_rps,
))

total = self.aggregated_stats(full_request_history=True)
data.append((
total.method,
total.name,
total.num_requests,
total.num_failures,
total.median_response_time,
total.avg_response_time,
total.min_response_time or 0,
total.max_response_time,
total.avg_content_length,
total.total_rps,
))

return data


class StatsEntry(object):
"""
Expand Down Expand Up @@ -339,23 +415,24 @@ def get_response_time_percentile(self, percent):
if((self.num_requests - processed_count) <= num_of_request):
return response_time

def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"):
if not self.num_requests:
def percentile(self, include_empty=False):
if not self.num_requests and not include_empty:
raise ValueError("Can't calculate percentile on url with no successful requests")

return tpl % (
str(self.method) + " " + self.name,
self.num_requests,
self.get_response_time_percentile(0.5),
self.get_response_time_percentile(0.66),
self.get_response_time_percentile(0.75),
self.get_response_time_percentile(0.80),
self.get_response_time_percentile(0.90),
self.get_response_time_percentile(0.95),
self.get_response_time_percentile(0.98),
self.get_response_time_percentile(0.99),
self.max_response_time
)

results = [self.method, self.name, self.num_requests]

if self.num_requests > 0:
for percentile in PERCENTILES:
results.append(self.get_response_time_percentile(percentile))

results.append(self.max_response_time)
else:
entry_count = len(PERCENTILES) + 1

result.extend(["N/A"] * entry_count)

return tuple(results)


class StatsError(object):
def __init__(self, method, name, error, occurences=0):
Expand Down Expand Up @@ -451,40 +528,15 @@ def on_slave_report(client_id, data):


def print_stats(stats):
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s %7s %7s %7s | %7s %7s") % ('Name', '# reqs', '# fails', 'Avg', 'Min', 'Max', 'Median', 'req/s'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
total_rps = 0
total_reqs = 0
total_failures = 0
for key in sorted(six.iterkeys(stats)):
r = stats[key]
total_rps += r.current_rps
total_reqs += r.num_requests
total_failures += r.num_failures
console_logger.info(r)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))

try:
fail_percent = (total_failures/float(total_reqs))*100
except ZeroDivisionError:
fail_percent = 0

console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %42.2f") % ('Total', total_reqs, "%d(%.2f%%)" % (total_failures, fail_percent), total_rps))
data = stats.get_request_stats_dataset()
console_logger.info(tabulate(data.dict, headers="keys"))
console_logger.info("")

def print_percentile_stats(stats):
data = stats.get_percentile_dataset()

console_logger.info("Percentage of the requests completed within given times")
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % ('Name', '# reqs', '50%', '66%', '75%', '80%', '90%', '95%', '98%', '99%', '100%'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(six.iterkeys(stats)):
r = stats[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info("-" * (80 + STATS_NAME_WIDTH))

total_stats = global_stats.aggregated_stats()
if total_stats.response_times:
console_logger.info(total_stats.percentile())
console_logger.info(tabulate(data.dict, headers="keys"))
console_logger.info("")

def print_error_report():
Expand All @@ -501,5 +553,5 @@ def print_error_report():
def stats_printer():
from runners import locust_runner
while True:
print_stats(locust_runner.request_stats)
print_stats(locust_runner.stats)
gevent.sleep(2)
54 changes: 4 additions & 50 deletions locust/web.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,36 +69,8 @@ def reset_stats():

@app.route("/stats/requests/csv")
def request_stats_csv():
rows = [
",".join([
'"Method"',
'"Name"',
'"# requests"',
'"# failures"',
'"Median response time"',
'"Average response time"',
'"Min response time"',
'"Max response time"',
'"Average Content Size"',
'"Requests/s"',
])
]

for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]):
rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
s.method,
s.name,
s.num_requests,
s.num_failures,
s.median_response_time,
s.avg_response_time,
s.min_response_time or 0,
s.max_response_time,
s.avg_content_length,
s.total_rps,
))

response = make_response("\n".join(rows))
data = runners.locust_runner.stats.get_request_stats_dataset()
response = make_response(data.csv)
file_name = "requests_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
Expand All @@ -107,26 +79,8 @@ def request_stats_csv():

@app.route("/stats/distribution/csv")
def distribution_stats_csv():
rows = [",".join((
'"Name"',
'"# requests"',
'"50%"',
'"66%"',
'"75%"',
'"80%"',
'"90%"',
'"95%"',
'"98%"',
'"99%"',
'"100%"',
))]
for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]):
if s.num_requests:
rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
else:
rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name)

response = make_response("\n".join(rows))
data = runners.locust_runner.stats.get_percentile_dataset(include_empty=True)
response = make_response(data.csv)
file_name = "distribution_{0}.csv".format(time())
disposition = "attachment;filename={0}".format(file_name)
response.headers["Content-type"] = "text/csv"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=["gevent==1.1.1", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq==15.2.0"],
install_requires=["gevent==1.1.1", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq==15.2.0", "tablib", "tabulate"],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should explicitly pin so that if something changes in those it doesn't break usage here

tests_require=['unittest2', 'mock'],
entry_points={
'console_scripts': [
Expand Down