Skip to content

Commit

Permalink
Fix lint issue C0303
Browse files Browse the repository at this point in the history
Relates elastic#838
  • Loading branch information
hub-cap committed Jan 8, 2020
1 parent e5da703 commit b208b93
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 24 deletions.
14 changes: 7 additions & 7 deletions esrally/driver/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@

def scheduler_for(name, params):
"""
Creates a scheduler instance
Creates a scheduler instance
:param name: The name under which the scheduler is registered.
:param params: A dict containing the parameters for this scheduler instance.
:param params: A dict containing the parameters for this scheduler instance.
:return: An initialized scheduler instance.
"""
try:
Expand All @@ -43,10 +43,10 @@ def scheduler_for(name, params):
def register_scheduler(name, scheduler):
"""
Registers a new scheduler. Attempting to register a scheduler with a name that is already taken will raise a ``SystemSetupError``.
:param name: The name under which to register the scheduler.
:param scheduler: Either a unary function ``float`` -> ``float`` or a class with the same interface as ``Scheduler``.
"""
logger = logging.getLogger(__name__)
if name in __SCHEDULERS:
Expand Down Expand Up @@ -117,10 +117,10 @@ def __str__(self):

class PoissonScheduler(Scheduler):
"""
Schedules the next execution according to a `Poisson distribution <https://en.wikipedia.org/wiki/Poisson_distribution>`_. A Poisson
Schedules the next execution according to a `Poisson distribution <https://en.wikipedia.org/wiki/Poisson_distribution>`_. A Poisson
distribution models random independent arrivals of clients which on average match the expected arrival rate which makes it suitable
for modelling access in open systems.
See also http://preshing.com/20111007/how-to-generate-random-timings-for-a-poisson-process/
"""

Expand Down
2 changes: 1 addition & 1 deletion esrally/mechanic/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _prepare_env(self, car_env, node_name, java_home, t):
# Don't merge here!
env["JAVA_HOME"] = java_home
env["ES_JAVA_OPTS"] = "-XX:+ExitOnOutOfMemoryError"

# we just blindly trust telemetry here...
for v in t.instrument_candidate_java_opts():
self._set_env(env, "ES_JAVA_OPTS", v)
Expand Down
4 changes: 2 additions & 2 deletions esrally/mechanic/provisioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def local(cfg, car, plugins, cluster_settings, ip, http_port, all_node_ips, all_
node_root_dir = os.path.join(target_root, node_name)

_, java_home = java_resolver.java_home(car.mandatory_var("runtime.jdk"), cfg)

es_installer = ElasticsearchInstaller(car, java_home, node_name, node_root_dir, all_node_ips, all_node_names, ip, http_port)
plugin_installers = [PluginInstaller(plugin, java_home) for plugin in plugins]

Expand Down Expand Up @@ -227,7 +227,7 @@ def _provisioner_variables(self):
provisioner_vars.update(self.es_installer.variables)
provisioner_vars.update(plugin_variables)
provisioner_vars["cluster_settings"] = cluster_settings

return provisioner_vars


Expand Down
2 changes: 1 addition & 1 deletion esrally/racecontrol.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def setup(self, msg, sender):
distribution_version = mechanic.cluster_distribution_version(self.cfg)
self.logger.info("Automatically derived distribution version [%s]", distribution_version)
self.cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", distribution_version)

t = track.load_track(self.cfg)
self.track_revision = self.cfg.opts("track", "repository.revision", mandatory=False)
challenge_name = self.cfg.opts("track", "challenge.name")
Expand Down
2 changes: 1 addition & 1 deletion esrally/rally.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ def positive_number(v):
"--distribution-repository",
help="Define the repository from where the Elasticsearch distribution should be downloaded (default: release).",
default="release")

task_filter_group = p.add_mutually_exclusive_group()
task_filter_group.add_argument(
"--include-tasks",
Expand Down
2 changes: 1 addition & 1 deletion esrally/utils/jvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def _java(java_home):
def supports_option(java_home, option):
"""
Detects support for a specific option (or combination of options) for the JVM version available in java_home.
:param java_home: The JAVA_HOME to use for probing.
:param option: The JVM option or combination of JVM options (separated by spaces) to check.
:return: True iff the provided ``option`` is supported on this JVM.
Expand Down
14 changes: 7 additions & 7 deletions tests/telemetry_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1974,7 +1974,7 @@ def test_resilient_if_error_response(self, metrics_store_add_meta_info):


class DiskIoTests(TestCase):

@mock.patch("esrally.utils.sysstats.process_io_counters")
@mock.patch("esrally.metrics.EsMetricsStore.put_count_node_level")
def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_counters):
Expand All @@ -1985,7 +1985,7 @@ def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_c

cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)

device = telemetry.DiskIo(node_count_on_host=1)
t = telemetry.Telemetry(enabled_devices=[], devices=[device])
node = cluster.Node(pid=None, binary_path="/bin", host_name="localhost", node_name="rally0", telemetry=t)
Expand All @@ -2000,9 +2000,9 @@ def test_diskio_process_io_counters(self, metrics_store_node_count, process_io_c
metrics_store_node_count.assert_has_calls([
mock.call("rally0", "disk_io_write_bytes", 1, "byte"),
mock.call("rally0", "disk_io_read_bytes", 1, "byte")

])

@mock.patch("esrally.utils.sysstats.disk_io_counters")
@mock.patch("esrally.utils.sysstats.process_io_counters")
@mock.patch("esrally.metrics.EsMetricsStore.put_count_node_level")
Expand All @@ -2012,10 +2012,10 @@ def test_diskio_disk_io_counters(self, metrics_store_node_count, process_io_coun
process_stop = Diskio(13, 13)
disk_io_counters.side_effect = [process_start, process_stop]
process_io_counters.side_effect = [None, None]

cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)

device = telemetry.DiskIo(node_count_on_host=2)
t = telemetry.Telemetry(enabled_devices=[], devices=[device])
node = cluster.Node(pid=None, binary_path="/bin", host_name="localhost", node_name="rally0", telemetry=t)
Expand All @@ -2027,7 +2027,7 @@ def test_diskio_disk_io_counters(self, metrics_store_node_count, process_io_coun
t.detach_from_node(node, running=False)
t.store_system_metrics(node, metrics_store)

# expected result is 1 byte because there are two nodes on the machine. Result is calculated
# expected result is 1 byte because there are two nodes on the machine. Result is calculated
# with total_bytes / node_count
metrics_store_node_count.assert_has_calls([
mock.call("rally0", "disk_io_write_bytes", 1, "byte"),
Expand Down
9 changes: 5 additions & 4 deletions tests/track/loader_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -732,6 +732,7 @@ def dummy_read_glob(c):
base_path = "~/.rally/benchmarks/tracks/default/geonames"
template_file_name = "track.json"
tmpl_src = loader.TemplateSource(base_path, template_file_name)
# pylint: disable=trailing-whitespace
expected_response = textwrap.dedent('''
{% import "rally.helpers" as rally with context %}
{
Expand Down Expand Up @@ -1896,9 +1897,9 @@ def test_parse_valid_track_specification(self):
{
"settings": {
"number_of_shards": {{ number_of_shards }}
},
},
"mappings": {
"main": "empty-for-test",
"main": "empty-for-test",
"secondary": "empty-for-test"
}
}
Expand Down Expand Up @@ -2068,8 +2069,8 @@ def test_parse_valid_track_specification_with_index_template(self):
{
"index_patterns": [ "{{index_pattern}}"],
"settings": {
"number_of_shards": {{ number_of_shards | default(1) }}
}
"number_of_shards": {{ number_of_shards | default(1) }}
}
}
"""],
}))
Expand Down

0 comments on commit b208b93

Please sign in to comment.