diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index a857ea9..d909f88 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -33,3 +33,8 @@ jobs: env_vars: OS,PYTHON fail_ci_if_error: true verbose: true + - name: Run benchmarks + uses: CodSpeedHQ/action@v2 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: pixi run test --codspeed diff --git a/pixi.toml b/pixi.toml index 02bb08f..2a61f6c 100644 --- a/pixi.toml +++ b/pixi.toml @@ -8,7 +8,8 @@ channels = ["conda-forge"] platforms = ["win-64", "linux-64"] [tasks] -postinstall = "pip install --upgrade --no-build-isolation --no-deps --disable-pip-version-check -e ." +# install packages not on conda-forge +postinstall = "pip install --upgrade --no-build-isolation --no-deps --disable-pip-version-check -e .; pip install pytest-codspeed mkdocs-bibtex" doc = "mkdocs serve" test = "pytest -v tests/ --cov=src --cov-report=xml" lint = "pre-commit run --all" diff --git a/tests/test_classical_classifier.py b/tests/test_classical_classifier.py index df5c6f1..8e0c33c 100644 --- a/tests/test_classical_classifier.py +++ b/tests/test_classical_classifier.py @@ -304,6 +304,7 @@ def _apply_rule( ) assert (y_pred == y_test).all() + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["all", "ex"]) def test_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if tick rule is correctly applied. @@ -323,6 +324,7 @@ def test_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1]) self._apply_rule(x_train, x_test, y_test, [("tick", subset)], 7) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["all", "ex"]) def test_rev_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if rev. tick rule is correctly applied. @@ -341,6 +343,7 @@ def test_rev_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1]) self._apply_rule(x_train, x_test, y_test, [("rev_tick", subset)], 7) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_quote_rule(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if quote rule is correctly applied. @@ -366,6 +369,7 @@ def test_quote_rule(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1, -1, 1]) self._apply_rule(x_train, x_test, y_test, [("quote", subset)], 45) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_lr(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the lr algorithm is correctly applied. @@ -389,6 +393,7 @@ def test_lr(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1]) self._apply_rule(x_train, x_test, y_test, [("lr", subset)], 7) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_rev_lr(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the rev. lr algorithm is correctly applied. @@ -419,6 +424,7 @@ def test_rev_lr(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1, -1, 1]) self._apply_rule(x_train, x_test, y_test, [("rev_lr", subset)], 42) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_emo(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the emo algorithm is correctly applied. @@ -449,6 +455,7 @@ def test_emo(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1, -1, 1]) self._apply_rule(x_train, x_test, y_test, [("emo", subset)], 42) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_rev_emo(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the rev. emo algorithm is correctly applied. @@ -479,6 +486,7 @@ def test_rev_emo(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([-1, 1, 1, -1, -1, 1]) self._apply_rule(x_train, x_test, y_test, [("rev_emo", subset)], 42) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_clnv(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the clnv algorithm is correctly applied. @@ -509,6 +517,7 @@ def test_clnv(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([1, -1, 1, -1, 1, -1]) self._apply_rule(x_train, x_test, y_test, [("clnv", subset)], 42) + @pytest.mark.benchmark() @pytest.mark.parametrize("subset", ["best", "ex"]) def test_rev_clnv(self, x_train: pd.DataFrame, subset: str) -> None: """Test, if the rev. clnv algorithm is correctly applied. @@ -538,6 +547,7 @@ def test_rev_clnv(self, x_train: pd.DataFrame, subset: str) -> None: y_test = pd.Series([1, -1, 1, -1, 1, -1]) self._apply_rule(x_train, x_test, y_test, [("rev_clnv", subset)], 5) + @pytest.mark.benchmark() def test_trade_size(self, x_train: pd.DataFrame) -> None: """Test, if the trade size algorithm is correctly applied. @@ -558,6 +568,7 @@ def test_trade_size(self, x_train: pd.DataFrame) -> None: y_test = pd.Series([-1, 1, -1, 1, -1, 1]) self._apply_rule(x_train, x_test, y_test, [("trade_size", "ex")], 42) + @pytest.mark.benchmark() def test_depth(self, x_train: pd.DataFrame) -> None: """Test, if the depth rule is correctly applied.