Skip to content

Commit

Permalink
Merge pull request #2040 from mgor/bug/typing
Browse files Browse the repository at this point in the history
fixing mypy errors with loosest rules
  • Loading branch information
cyberw authored Mar 8, 2022
2 parents ce15abe + 1694837 commit 6bddf09
Show file tree
Hide file tree
Showing 22 changed files with 124 additions and 72 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@ locust.wpr
locust.egg-info/**
locustio.egg-info/**
locust/_version.py
locust/test/mock_*.py
docs/_build/**
docs/cli-help-output.txt
docs/config-options.rst
mock.*.egg
web_test_*.csv
err.txt
out.txt
.eggs/
dist/**
.idea/**
Expand Down
6 changes: 4 additions & 2 deletions locust/clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

from .exception import CatchResponseError, LocustError, ResponseError

from typing import Union, Optional, Generator

absolute_http_url_regexp = re.compile(r"^https?://", re.I)


Expand Down Expand Up @@ -78,7 +80,7 @@ def _build_url(self, path):
return f"{self.base_url}{path}"

@contextmanager
def rename_request(self, name: str):
def rename_request(self, name: str) -> Generator[None, None, None]:
"""Group requests using the "with" keyword"""

self.request_name = name
Expand Down Expand Up @@ -215,7 +217,7 @@ class ResponseContextManager(LocustResponse):
:py:meth:`failure <locust.clients.ResponseContextManager.failure>`.
"""

_manual_result = None
_manual_result: Optional[Union[bool, Exception]] = None
_entered = False

def __init__(self, response, request_event, request_meta):
Expand Down
9 changes: 5 additions & 4 deletions locust/contrib/fasthttp.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from urllib.parse import urlparse, urlunparse
from ssl import SSLError
import time
from typing import Optional

from http.cookiejar import CookieJar

Expand Down Expand Up @@ -83,7 +84,7 @@ def __init__(self, environment: Environment, base_url: str, user: "FastHttpUser"
# Check for basic authentication
parsed_url = urlparse(self.base_url)
if parsed_url.username and parsed_url.password:
netloc = parsed_url.hostname
netloc = parsed_url.hostname or ""
if parsed_url.port:
netloc += ":%d" % parsed_url.port

Expand Down Expand Up @@ -328,11 +329,11 @@ class FastResponse(CompatResponse):

_response = None

encoding: str = None
encoding: Optional[str] = None
"""In some cases setting the encoding explicitly is needed. If so, do it before calling .text"""

@property
def text(self) -> str:
def text(self) -> Optional[str]:
"""
Returns the text content of the response as a decoded string
"""
Expand All @@ -349,7 +350,7 @@ def json(self) -> dict:
"""
Parses the response as json and returns a dict
"""
return json.loads(self.text)
return json.loads(self.text) # type: ignore

def raise_for_status(self):
"""Raise any connection errors that occurred during the request"""
Expand Down
10 changes: 4 additions & 6 deletions locust/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import inspect
import locust
from locust import User, argument_parser
from typing import Type
from typing import Type, Optional
from locust.env import Environment
from locust.exception import CatchResponseError, RescheduleTask

Expand Down Expand Up @@ -64,8 +64,6 @@ def on_request(
errortext = e[:500].replace("\n", " ")
else:
errortext = ""
if not context:
context = ""

if response_time is None:
response_time = -1
Expand All @@ -87,15 +85,15 @@ def on_request(
_print_t(errortext.ljust(9))

if self.include_context:
_print_t(context)
_print_t(context or "")

if self.include_payload:
_print_t(response._request.payload)

print()


_env: Environment = None # minimal Environment for debugging
_env: Optional[Environment] = None # minimal Environment for debugging


def run_single_user(
Expand Down Expand Up @@ -144,5 +142,5 @@ def run_single_user(

# game on!
user = user_class(_env)
_env.single_user_instance = user # if you happen to need access to this from the Environment instance
setattr(_env, "single_user_instance", user) # if you happen to need access to this from the Environment instance
user.run()
40 changes: 23 additions & 17 deletions locust/dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@
import time
from collections.abc import Iterator
from operator import attrgetter
from typing import Dict, Generator, List, TYPE_CHECKING, Optional, Tuple, Type
from typing import Dict, Generator, List, TYPE_CHECKING, Optional, Tuple, Type, Set

import gevent
import typing

from roundrobin import smooth

Expand Down Expand Up @@ -62,13 +61,13 @@ def __init__(self, worker_nodes: "List[WorkerNode]", user_classes: List[Type[Use
assert len(user_classes) > 0
assert len(set(self._user_classes)) == len(self._user_classes)

self._target_user_count = None
self._target_user_count: int = None

self._spawn_rate = None
self._spawn_rate: float = None

self._user_count_per_dispatch_iteration = None
self._user_count_per_dispatch_iteration: int = None

self._wait_between_dispatch = None
self._wait_between_dispatch: float = None

self._initial_users_on_workers = {
worker_node.id: {user_class.__name__: 0 for user_class in self._user_classes}
Expand All @@ -77,18 +76,18 @@ def __init__(self, worker_nodes: "List[WorkerNode]", user_classes: List[Type[Use

self._users_on_workers = self._fast_users_on_workers_copy(self._initial_users_on_workers)

self._current_user_count = sum(map(sum, map(dict.values, self._users_on_workers.values())))
self._current_user_count = self.get_current_user_count()

self._dispatcher_generator = None
self._dispatcher_generator: Generator[Dict[str, Dict[str, int]], None, None] = None

self._user_generator = self._user_gen()

self._worker_node_generator = itertools.cycle(self._worker_nodes)

# To keep track of how long it takes for each dispatch iteration to compute
self._dispatch_iteration_durations = []
self._dispatch_iteration_durations: List[float] = []

self._active_users = []
self._active_users: List[Tuple[WorkerNode, str]] = []

# TODO: Test that attribute is set when dispatching and unset when done dispatching
self._dispatch_in_progress = False
Expand All @@ -99,6 +98,10 @@ def __init__(self, worker_nodes: "List[WorkerNode]", user_classes: List[Type[Use

self._no_user_to_spawn = False

def get_current_user_count(self) -> int:
# need to ignore type due to https://github.com/python/mypy/issues/1507
return sum(map(sum, map(dict.values, self._users_on_workers.values()))) # type: ignore

@property
def dispatch_in_progress(self):
return self._dispatch_in_progress
Expand Down Expand Up @@ -179,7 +182,7 @@ def new_dispatch(self, target_user_count: int, spawn_rate: float) -> None:

self._users_on_workers = self._fast_users_on_workers_copy(self._initial_users_on_workers)

self._current_user_count = sum(map(sum, map(dict.values, self._users_on_workers.values())))
self._current_user_count = self.get_current_user_count()

self._dispatcher_generator = self._dispatcher()

Expand Down Expand Up @@ -231,7 +234,7 @@ def _prepare_rebalance(self) -> None:
self._rebalance = True

@contextlib.contextmanager
def _wait_between_dispatch_iteration_context(self) -> None:
def _wait_between_dispatch_iteration_context(self) -> Generator[None, None, None]:
t0_rel = time.perf_counter()

# We don't use `try: ... finally: ...` because we don't want to sleep
Expand Down Expand Up @@ -301,7 +304,9 @@ def _get_user_current_count(self, user: str) -> int:

def _distribute_users(
self, target_user_count: int
) -> Tuple[dict, Generator[str, None, None], typing.Iterator["WorkerNode"], List[Tuple["WorkerNode", str]]]:
) -> Tuple[
Dict[str, Dict[str, int]], Generator[Optional[str], None, None], itertools.cycle, List[Tuple["WorkerNode", str]]
]:
"""
This function might take some time to complete if the `target_user_count` is a big number. A big number
is typically > 50 000. However, this function is only called if a worker is added or removed while a test
Expand Down Expand Up @@ -330,7 +335,7 @@ def _distribute_users(

return users_on_workers, user_gen, worker_gen, active_users

def _user_gen(self) -> Generator[str, None, None]:
def _user_gen(self) -> Generator[Optional[str], None, None]:
"""
This method generates users according to their weights using
a smooth weighted round-robin algorithm implemented by https://github.com/linnik/roundrobin.
Expand All @@ -342,7 +347,7 @@ def _user_gen(self) -> Generator[str, None, None]:
less accurate during ramp-up/down.
"""

def infinite_cycle_gen(users: List[Tuple[User, int]]) -> Generator[Optional[str], None, None]:
def infinite_cycle_gen(users: List[Tuple[Type[User], int]]) -> itertools.cycle:
if not users:
return itertools.cycle([None])

Expand Down Expand Up @@ -382,7 +387,7 @@ def infinite_cycle_gen(users: List[Tuple[User, int]]) -> Generator[Optional[str]
if self._try_dispatch_fixed:
self._try_dispatch_fixed = False
current_fixed_users_count = {u: self._get_user_current_count(u) for u in fixed_users}
spawned_classes = set()
spawned_classes: Set[str] = set()
while len(spawned_classes) != len(fixed_users):
user_name = next(cycle_fixed_gen)
if not user_name:
Expand All @@ -409,4 +414,5 @@ def _fast_users_on_workers_copy(users_on_workers: Dict[str, Dict[str, int]]) ->
The implementation was profiled and compared to other implementations such as dict-comprehensions
and the one below is the most efficient.
"""
return dict(zip(users_on_workers.keys(), map(dict.copy, users_on_workers.values())))
# type is ignored due to: https://github.com/python/mypy/issues/1507
return dict(zip(users_on_workers.keys(), map(dict.copy, users_on_workers.values()))) # type: ignore
9 changes: 5 additions & 4 deletions locust/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Type,
TypeVar,
Union,
Optional,
)

from configargparse import Namespace
Expand Down Expand Up @@ -40,13 +41,13 @@ def __init__(
parsed_options: Namespace = None,
):

self.runner: Runner = None
self.runner: Optional[Runner] = None
"""Reference to the :class:`Runner <locust.runners.Runner>` instance"""

self.web_ui: WebUI = None
self.web_ui: Optional[WebUI] = None
"""Reference to the WebUI instance"""

self.process_exit_code: int = None
self.process_exit_code: Optional[int] = None
"""
If set it'll be the exit code of the Locust process
"""
Expand Down Expand Up @@ -112,7 +113,7 @@ def _create_runner(
) -> RunnerType:
if self.runner is not None:
raise RunnerAlreadyExistsError(f"Environment.runner already exists ({self.runner})")
self.runner: RunnerType = runner_class(self, *args, **kwargs)
self.runner = runner_class(self, *args, **kwargs)

# Attach the runner to the shape class so that the shape class can access user count state
if self.shape_class:
Expand Down
4 changes: 2 additions & 2 deletions locust/input_events.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Dict
from typing import Dict, Callable

import gevent
import logging
Expand Down Expand Up @@ -88,7 +88,7 @@ def get_poller():
return UnixKeyPoller()


def input_listener(key_to_func_map: Dict[str, callable]):
def input_listener(key_to_func_map: Dict[str, Callable]):
def input_listener_func():
try:
poller = get_poller()
Expand Down
4 changes: 2 additions & 2 deletions locust/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def stop_users(self, user_classes_stop_count: Dict[str, int]):
if self.user_classes_count[user_class] == 0:
continue

to_stop = []
to_stop: List[greenlet.greenlet] = []
for user_greenlet in self.user_greenlets:
if len(to_stop) == stop_count:
break
Expand Down Expand Up @@ -1002,7 +1002,7 @@ def worker_count(self):

@property
def reported_user_classes_count(self) -> Dict[str, int]:
reported_user_classes_count = defaultdict(lambda: 0)
reported_user_classes_count: Dict[str, int] = defaultdict(lambda: 0)
for client in self.clients.ready + self.clients.spawning + self.clients.running:
for name, count in client.user_classes_count.items():
reported_user_classes_count[name] += count
Expand Down
2 changes: 1 addition & 1 deletion locust/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ class LoadTestShape:
during a load test.
"""

runner: Runner = None
runner: Optional[Runner] = None
"""Reference to the :class:`Runner <locust.runners.Runner>` instance"""

def __init__(self):
Expand Down
13 changes: 7 additions & 6 deletions locust/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
import csv
import signal
import gevent
from typing import Dict, Tuple

from .exception import StopUser, CatchResponseError
from .exception import CatchResponseError

import logging

Expand Down Expand Up @@ -129,8 +130,8 @@ def __init__(self, use_response_times_cache=True):
is not needed.
"""
self.use_response_times_cache = use_response_times_cache
self.entries: dict[str, StatsEntry] = {}
self.errors: dict[str, StatsError] = {}
self.entries: Dict[Tuple[str, str], StatsEntry] = {}
self.errors: Dict[str, StatsError] = {}
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.history = []

Expand Down Expand Up @@ -240,11 +241,11 @@ def __init__(self, stats: RequestStats, name: str, method: str, use_response_tim
""" Minimum response time """
self.max_response_time = 0
""" Maximum response time """
self.num_reqs_per_sec = {}
self.num_reqs_per_sec: Dict[int, int] = {}
""" A {second => request_count} dict that holds the number of requests made per second """
self.num_fail_per_sec = {}
self.num_fail_per_sec: Dict[int, int] = {}
""" A (second => failure_count) dict that hold the number of failures per second """
self.response_times = {}
self.response_times: Dict[int, int] = {}
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
Expand Down
11 changes: 9 additions & 2 deletions locust/test/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,13 @@

# work around occasional "zmq.error.ZMQError: Too many open files"
# this is done in main.py when running locust proper so we need to do it here as well
resource.setrlimit(resource.RLIMIT_NOFILE, [10000, resource.RLIM_INFINITY])
resource.setrlimit(
resource.RLIMIT_NOFILE,
(
10000,
resource.RLIM_INFINITY,
),
)
changed_rlimit = True
except Exception:
pass # Some OS:es will not allow changing NOFILE, but let's ignore that
changed_rlimit = False
Loading

0 comments on commit 6bddf09

Please sign in to comment.