diff --git a/locust/core.py b/locust/core.py index f8e97f788f..a3029904cc 100644 --- a/locust/core.py +++ b/locust/core.py @@ -342,7 +342,7 @@ def user_count(self): def weight_locusts(self, amount, stop_timeout = None): """ Distributes the amount of locusts for each WebLocust-class according to it's weight - and a list: bucket with the weighted locusts is returned + returns a list "bucket" with the weighted locusts """ bucket = [] weight_sum = sum((locust.weight for locust in self.locust_classes)) @@ -362,7 +362,7 @@ def weight_locusts(self, amount, stop_timeout = None): bucket.extend([locust for x in xrange(0, num_locusts)]) return bucket - def hatch(self, spawn_count=None, stop_timeout=None, wait=False): + def spawn_locusts(self, spawn_count=None, stop_timeout=None, wait=False): if spawn_count is None: spawn_count = self.num_clients @@ -380,7 +380,7 @@ def hatch(self, spawn_count=None, stop_timeout=None, wait=False): print "\nHatching and swarming %i clients at the rate %g clients/s...\n" % (spawn_count, self.hatch_rate) occurence_count = dict([(l.__name__, 0) for l in self.locust_classes]) - def spawn_locusts(): + def hatch(): sleep_time = 1.0 / self.hatch_rate while True: if not bucket: @@ -402,7 +402,7 @@ def start_locust(_): print "%i locusts hatched" % len(self.locusts) gevent.sleep(sleep_time) - spawn_locusts() + hatch() if wait: self.locusts.join() print "All locusts dead\n" @@ -419,7 +419,7 @@ def kill_locusts(self, kill_count): print "killing locusts:", kill_count dying = [] for g in self.locusts: - for l in bucket: + for l in bucket: if l == g.args[0]: dying.append(g) bucket.remove(l) @@ -431,8 +431,8 @@ def kill_locusts(self, kill_count): def start_hatching(self, locust_count=None, hatch_rate=None, wait=False): print "start hatching", locust_count, hatch_rate, self.state if self.state != STATE_RUNNING and self.state != STATE_HATCHING: - RequestStats.clear_all() - RequestStats.global_start_time = time() + RequestStats.clear_all() + RequestStats.global_start_time = time() # Dynamically changing the locust count if self.state != STATE_INIT and self.state != STATE_STOPPED: self.state = STATE_HATCHING @@ -445,14 +445,14 @@ def start_hatching(self, locust_count=None, hatch_rate=None, wait=False): if hatch_rate: self.hatch_rate = hatch_rate spawn_count = locust_count - self.num_clients - self.hatch(spawn_count=spawn_count) + self.spawn_locusts(spawn_count=spawn_count) else: if hatch_rate: self.hatch_rate = hatch_rate if locust_count: - self.hatch(locust_count, wait=wait) + self.spawn_locusts(locust_count, wait=wait) else: - self.hatch(wait=wait) + self.spawn_locusts(wait=wait) def stop(self): # if we are currently hatching locusts we need to kill the hatching greenlet first @@ -461,6 +461,82 @@ def stop(self): self.locusts.kill(block=True) self.state = STATE_STOPPED + + def start_ramping(self, hatch_rate=None, max_locusts=1000, hatch_stride=100, + percent=0.95, response_time_limit=2000, acceptable_fail=0.05, + precision=200, start_count=0, calibration_time=15): + + from rampstats import current_percentile + if hatch_rate: + self.hatch_rate = hatch_rate + + def ramp_down_help(clients, hatch_stride): + print "ramping down..." + hatch_stride = max(hatch_stride/2, precision) + clients -= hatch_stride + self.start_hatching(clients, self.hatch_rate) + return clients, hatch_stride + + def ramp_up(clients, hatch_stride, boundery_found=False): + while True: + if self.state != STATE_HATCHING: + if self.num_clients >= max_locusts: + print "ramp up stopped due to max locusts limit reached:", max_locusts + client, hatch_stride = ramp_down_help(clients, hatch_stride) + return ramp_down(clients, hatch_stride) + gevent.sleep(calibration_time) + fail_ratio = RequestStats.sum_stats().fail_ratio + if fail_ratio > acceptable_fail: + print "ramp up stopped due to acceptable fail ratio %d%% exceeded with fail ratio %d%%" % (acceptable_fail*100, fail_ratio*100) + client, hatch_stride = ramp_down_help(clients, hatch_stride) + return ramp_down(clients, hatch_stride) + p = current_percentile(percent) + if p >= response_time_limit: + print "ramp up stopped due to percentile response times getting high:", p + client, hatch_stride = ramp_down_help(clients, hatch_stride) + return ramp_down(clients, hatch_stride) + if boundery_found and hatch_stride <= precision: + print "sweet spot found, ramping stopped!" + return + print "ramping up..." + if boundery_found: + hatch_stride = max((hatch_stride/2),precision) + clients += hatch_stride + self.start_hatching(clients, self.hatch_rate) + gevent.sleep(1) + + def ramp_down(clients, hatch_stride): + while True: + if self.state != STATE_HATCHING: + if self.num_clients < max_locusts: + gevent.sleep(calibration_time) + fail_ratio = RequestStats.sum_stats().fail_ratio + if fail_ratio <= acceptable_fail: + p = current_percentile(percent) + if p <= response_time_limit: + if hatch_stride <= precision: + print "sweet spot found, ramping stopped!" + return + print "ramping up..." + hatch_stride = max((hatch_stride/2),precision) + clients += hatch_stride + self.start_hatching(clients, self.hatch_rate) + return ramp_up(clients, hatch_stride, True) + print "ramping down..." + hatch_stride = max((hatch_stride/2),precision) + clients -= hatch_stride + if clients > 0: + self.start_hatching(clients, self.hatch_rate) + else: + print "WARNING: no responses met the ramping thresholds, check your ramp configuration, locustfile and \"--host\" address" + print "ramping stopped!" + return + gevent.sleep(1) + + if start_count > self.num_clients: + self.start_hatching(start_count, hatch_rate) + ramp_up(start_count, hatch_stride) + class LocalLocustRunner(LocustRunner): def start_hatching(self, locust_count=None, hatch_rate=None, wait=False): self.hatching_greenlet = gevent.spawn(lambda: super(LocalLocustRunner, self).start_hatching(locust_count, hatch_rate, wait=wait)) @@ -536,68 +612,6 @@ def start_hatching(self, locust_count, hatch_rate): RequestStats.global_start_time = time() self.state = STATE_HATCHING - def start_ramping(self, hatch_rate=None, max_locusts=1000, hatch_stride=None, percent=0.95, response_time=2000, acceptable_fail=0.05): - if hatch_rate: - self.hatch_rate = hatch_rate - - if not hatch_stride: - hatch_stride = 100 - - clients = hatch_stride - - # Record low load percentile - def calibrate(): - self.start_hatching(clients, self.hatch_rate) - while True: - if self.state != STATE_HATCHING: - print "recording low_percentile..." - gevent.sleep(30) - percentile = RequestStats.sum_stats().one_percentile(percent) - print "low_percentile:", percentile - self.start_hatching(1, self.hatch_rate) - return percentile - gevent.sleep(1) - - low_percentile = calibrate() - - while True: - if self.state != STATE_HATCHING: - if self.num_clients >= max_locusts: - print "ramping stopped due to max_locusts limit reached:", max_locusts - return - gevent.sleep(10) - if RequestStats.sum_stats().fail_ratio >= acceptable_fail: - print "ramping stopped due to acceptable_fail ratio (%d1.2%%) exceeded with fail ratio %1.2d%%", (acceptable_fail*100, RequestStats.sum_stats().fail_ratio*100) - return - p = RequestStats.sum_stats().one_percentile(percent) - if p >= low_percentile * 2.0: - print "ramping stopped due to response times getting high:", p - return - self.start_hatching(clients, self.hatch_rate) - clients += hatch_stride - gevent.sleep(1) - -# while True: -# if self.state != STATE_HATCHING: -# print "self.num_clients: %i max_locusts: %i" % (self.num_clients, max_locusts) -# if self.num_clients >= max_locusts: -# print "ramping stopped due to max_locusts limit reached:", max_locusts -# return -# gevent.sleep(5) -# if self.state != STATE_INIT: -# print "num_reqs: %i fail_ratio: %1.2d" % (RequestStats.sum_stats().num_reqs, RequestStats.sum_stats().fail_ratio) -# while RequestStats.sum_stats().num_reqs < 100: -# if RequestStats.sum_stats().fail_ratio >= acceptable_fail: -# print "ramping stopped due to acceptable_fail ratio (%d1.2%%) exceeded with fail ratio %1.2d%%", (acceptable_fail*100, RequestStats.sum_stats().fail_ratio*100) -# return -# gevent.sleep(1) -# if RequestStats.sum_stats().one_percentile(percent) >= response_time: -# print "ramping stopped due to response times over %ims for %1.2f%%" % (response_time, percent*100) -# return -# self.start_hatching(clients, self.hatch_rate) -# clients += 10 * hatchrate -# gevent.sleep(1) - def stop(self): for client in self.clients.hatching + self.clients.running: self.server.send({"type":"stop", "data":{}}) diff --git a/locust/main.py b/locust/main.py index 2ac9814a7b..abd53765ac 100644 --- a/locust/main.py +++ b/locust/main.py @@ -160,6 +160,15 @@ def parse_options(): help="Host or IP adress of locust master for distributed load testing. Only used when running with --slave. Defaults to 127.0.0.1." ) + # ramp feature enabled option + parser.add_option( + '--ramp', + action='store_true', + dest='ramp', + default=False, + help="Enables the auto tuning ramping feature for finding highest stable client count. NOTE having ramp enabled will add some more overhead for additional stats gathering" + ) + # Finalize # Return three-tuple of parser + the output from parse_args (opt obj, args) opts, args = parser.parse_args() @@ -322,11 +331,11 @@ def main(): # if --master is set, implicitly set --web if options.master: options.web = True - + if options.web and not options.slave: # spawn web greenlet print "Starting web monitor on port 8089" - main_greenlet = gevent.spawn(web.start, locust_classes, options.hatch_rate, options.num_clients, options.num_requests) + main_greenlet = gevent.spawn(web.start, locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.ramp) # enable/disable gzip in WebLocust's HTTP client WebLocust.gzip = options.gzip @@ -343,6 +352,17 @@ def main(): core.locust_runner = SlaveLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host) main_greenlet = core.locust_runner.greenlet + if options.ramp: + import rampstats + from rampstats import on_request_success, on_report_to_master, on_slave_report + import events + if options.slave: + events.report_to_master += on_report_to_master + if options.master: + events.slave_report += on_slave_report + else: + events.request_success += on_request_success + if options.print_stats or (not options.web and not options.slave): # spawn stats printing greenlet gevent.spawn(stats_printer) diff --git a/locust/rampstats.py b/locust/rampstats.py new file mode 100644 index 0000000000..e69733c30a --- /dev/null +++ b/locust/rampstats.py @@ -0,0 +1,49 @@ +from stats import percentile, RequestStats +from core import locust_runner, DistributedLocustRunner +from collections import deque +import events +import math + +master_response_times = deque([]) +slave_response_times = [] + +# Are we running in distributed mode or not? +is_distributed = isinstance(locust_runner, DistributedLocustRunner) + +# The time window in seconds that current_percentile use data from +PERCENTILE_TIME_WINDOW = 15.0 + +def current_percentile(percent): + if is_distributed: + # Flatten out the deque of lists and calculate the percentile to be returned + return percentile(sorted([item for sublist in master_response_times for item in sublist]), percent) + else: + return percentile(sorted(master_response_times), percent) + +def on_request_success(_, response_time, _2): + if is_distributed: + slave_response_times.append(response_time) + else: + master_response_times.append(response_time) + + # remove from the queue + rps = RequestStats.sum_stats().current_rps + if len(master_response_times) > rps*PERCENTILE_TIME_WINDOW: + for i in xrange(len(master_response_times) - int(math.ceil(rps*PERCENTILE_TIME_WINDOW))): + master_response_times.popleft() + +def on_report_to_master(_, data): + global slave_response_times + data["current_responses"] = slave_response_times + slave_response_times = [] + +def on_slave_report(_, data): + from core import locust_runner, SLAVE_REPORT_INTERVAL + if "current_responses" in data: + master_response_times.append(data["current_responses"]) + + # remove from the queue + slaves = locust_runner.slave_count + response_times_per_slave_count = PERCENTILE_TIME_WINDOW/SLAVE_REPORT_INTERVAL + if len(master_response_times) > slaves * response_times_per_slave_count: + master_response_times.popleft() diff --git a/locust/static/style.css b/locust/static/style.css index 2df06cbb17..8affab52f4 100644 --- a/locust/static/style.css +++ b/locust/static/style.css @@ -93,6 +93,12 @@ a { top: 100px; margin-left: -169px; } + +.ramp { + width:800px; + margin-left: -370px; +} + .start .padder, .edit .padder { padding: 30px; padding-top: 0px; @@ -167,6 +173,15 @@ a { .stopped .edit a.close_link, .ready .edit a.close_link {display: none;} .running .edit a.close_link, .hatching .edit a.close_link {display: inline;} +.ready .ramp {display: none;} + +.ready .ramp_text {display: inline;} +.hatching .ramp_text, .running .ramp_text, .stopped .ramp_text {display: none;} + +.stats_label { + cursor: pointer; +} + .status table { border-collapse: collapse; width: 100%; diff --git a/locust/stats.py b/locust/stats.py index 7dc3b32003..880d50e284 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -99,7 +99,7 @@ def log_error(self, error): @property def fail_ratio(self): try: - return float(self.num_failures) / self.num_reqs + return float(self.num_failures) / (self.num_reqs + self.num_failures) except ZeroDivisionError: if self.num_failures > 0: return 1.0 @@ -162,7 +162,7 @@ def iadd_stats(self, other, full_request_history=False): self.max_response_time = max(self.max_response_time, other.max_response_time) self._min_response_time = min(self._min_response_time, other._min_response_time) or other._min_response_time self.total_content_length = self.total_content_length + other.total_content_length - + if full_request_history: for key in other.response_times: self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key] diff --git a/locust/templates/index.html b/locust/templates/index.html index c02a94fc4d..303d275763 100644 --- a/locust/templates/index.html +++ b/locust/templates/index.html @@ -17,8 +17,11 @@
{{user_count}} users
- New test + New test Edit + {% if ramp %} + Ramp + {% endif %} {% if is_distributed %}
@@ -57,10 +60,17 @@

Start new Locust swarm



- +
+
+
+ {% if ramp %} + + ...or let locust ramp find the highest stable locust count for you + {% endif %} +
@@ -74,11 +84,50 @@

Change the locust count



- +
+ + {% if ramp %} +
+
+ Close +
+
+

Ramping

+
+
+ +
+ +
+ +
+ +
+ +
+
+
+ +
+ +
+ +
+ +
+

+ +
+ +
+
+
+
+ {% endif %}