diff --git a/CHANGELOG.md b/CHANGELOG.md index 5304992..66e3fc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,13 @@ All notable changes to this project will be documented in this file. The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] +### Added +- Option to choose between three different strategies to translate short-term + ratings into scores and vice versa ([#24](https://github.com/hsbc/pyratings/pull/24)). + ### Changed -- BREAKING CHANGE: Automatic column naming +- BREAKING CHANGE: Automatic column naming + ([#9](https://github.com/hsbc/pyratings/issues/9)). - ``get_scores_from_ratings()`` When input a ``pd.Series``, the name of the output series will now become ``ratings.name`` prefixed with "rtg_score_". @@ -16,9 +21,14 @@ The format is loosely based on [Keep a Changelog](https://keepachangelog.com/en/ ``ratings.name`` prefixed with "warf_". When input a pd.DataFrame, the column names of the output frame will now become ``ratings.columns`` prefixed with "warf_". - +- BREAKING CHANGE: Translations of short-term ratings are now different + ([#16](https://github.com/hsbc/pyratings/issues/16)). + ### Improved -- Splitting the code base into multiple files in order to increase maintainability. +- Splitting the code base into multiple files in order to increase maintainability + ([#8](https://github.com/hsbc/pyratings/issues/8)). +- Internal checks have been improved + ([#20](https://github.com/hsbc/pyratings/issues/20)). - Documentation has been updated and will now be created via [mkdocs](https://www.mkdocs.org/) and [mkdocstrings](https://mkdocstrings.github.io/python/). diff --git a/README.md b/README.md index 95cacd2..724991c 100644 --- a/README.md +++ b/README.md @@ -19,50 +19,9 @@ _pyratings_ offers the following capabilities: * Compute Weighted Average Rating Factor (WARF) on a portfolio level. * Compute WARF buffer, i.e. distance from current WARF to next maxWARF. -Transformations from ratings to scores/WARF and vice versa will take place according -to the following translation table: +To get familiar with _pyratings'_ functionality, take a look at the +[Getting started](https://hsbc.github.io/pyratings/getting_started/) section of the +[documentation](https://hsbc.github.io/pyratings/). -| Moody’s | S&P | Fitch | ICE | DBRS | Bloomberg | Score | WARF | MinWARF* | MaxWARF* | -|:-------:|:----:|:-----:|:----:|:----:|:---------:|------:|------:|---------:|---------:| -| Aaa | AAA | AAA | AAA | AAA | AAA | 1 | 1 | 1 | 5 | -| Aa1 | AA+ | AA+ | AA+ | AAH | AA+ | 2 | 10 | 5 | 15 | -| Aa2 | AA | AA | AA | AA | AA | 3 | 20 | 15 | 30 | -| Aa3 | AA- | AA- | AA- | AAL | AA- | 4 | 40 | 30 | 55 | -| A1 | A+ | A+ | A+ | AH | A+ | 5 | 70 | 55 | 95 | -| A2 | A | A | A | A | A | 6 | 120 | 95 | 150 | -| A3 | A- | A- | A- | AL | A- | 7 | 180 | 150 | 220 | -| Baa1 | BBB+ | BBB+ | BBB+ | BBBH | BBB+ | 8 | 260 | 220 | 310 | -| Baa2 | BBB | BBB | BBB | BBB | BBB | 9 | 360 | 310 | 485 | -| Baa3 | BBB- | BBB- | BBB- | BBBL | BBB- | 10 | 610 | 485 | 775 | -| Ba1 | BB+ | BB+ | BB+ | BBH | BB+ | 11 | 940 | 775 | 1145 | -| Ba2 | BB | BB | BB | BB | BB | 12 | 1350 | 1145 | 1558 | -| Ba3 | BB- | BB- | BB- | BBL | BB- | 13 | 1766 | 1558 | 1993 | -| B1 | B+ | B+ | B+ | BH | B+ | 14 | 2220 | 1993 | 2470 | -| B2 | B | B | B | B | B | 15 | 2720 | 2470 | 3105 | -| B3 | B- | B- | B- | BL | B- | 16 | 3490 | 3105 | 4130 | -| Caa1 | CCC+ | CCC+ | CCC+ | CCCH | CCC+ | 17 | 4770 | 4130 | 5635 | -| Caa2 | CCC | CCC | CCC | CCC | CCC | 18 | 6500 | 5635 | 7285 | -| Caa3 | CCC- | CCC- | CCC- | CCCL | CCC- | 19 | 8070 | 7285 | 9034 | -| Ca | CC | CC | CC | CC | CC | 20 | 9998 | 9034 | 9998.5 | -| C | C | C | C | C | C | 21 | 9999 | 9998.5 | 9999.5 | -| D | D | D | D | D | DDD | 22 | 10000 | 9999.5 | 10000 | - -`MinWARF` is inclusive, while `MaxWARF` is exclusive. - -Short-term ratings - -| Moody’s | S&P | Fitch | DBRS | Score | -|:-------:|:----:|:-----:|:----------:| -----:| -| P-1 | A-1+ | F1+ | R-1 (high) | 1 | -| | | | R-1 (mid) | 2 | -| | | | R-1 (low) | 3 | -| | A-1 | F1 | R-2 (high) | 5 | -| | | | R-2 (mid) | 6 | -| P-2 | A-2 | F2 | R-2 (low) | 7 | -| | | | R-3 (high) | 8 | -| P-3 | A-3 | F3 | R-3 (mid) | 9 | -| | | | R-3 (low) | 10 | -| NP | B | | R-4 | 12 | -| | | | R-5 | 15 | -| | C | | | 18 | -| | D | | D | 22 | +Contributions are welcome. Please read the +[Contributing](https://hsbc.github.io/pyratings/contributing/) section. diff --git a/src/pyratings/get_ratings.py b/src/pyratings/get_ratings.py index 79c39ee..60eae70 100644 --- a/src/pyratings/get_ratings.py +++ b/src/pyratings/get_ratings.py @@ -81,6 +81,7 @@ def get_ratings_from_scores( rating_scores: Union[int, float, pd.Series, pd.DataFrame], rating_provider: Optional[Union[str, List[str]]] = None, tenor: str = "long-term", + short_term_strategy: Optional[str] = None, ) -> Union[str, pd.Series, pd.DataFrame]: """Convert numerical rating scores into regular ratings. @@ -96,6 +97,24 @@ def get_ratings_from_scores( column names. tenor Should contain any valid tenor out of {"long-term", "short-term"}. + short_term_strategy + Will only be used, if `tenor` is "short-term". Choose between three distinct + strategies in order to translate a long-term rating score into a short-term + rating. Must be in {"best", "base", "worst"}. + + Compare + https://hsbc.github.io/pyratings/short-term-rating/#there's-one-more-catch... + + - Strategy 1 (best): + Always choose the best possible short-term rating. That's the optimistic + approach. + - Strategy 2 (base-case): + Always choose the short-term rating that a rating agency would usually assign + if there aren't any special liquidity issues (positive or negative). That's + the base-case approach. + - Strategy 3 (worst): + Always choose the worst possible short-term rating. That's the conservative + approach. Returns ------- @@ -109,15 +128,37 @@ def get_ratings_from_scores( Examples -------- - Converting a single rating score: + Converting a single long-term rating score: >>> get_ratings_from_scores(rating_scores=9, rating_provider="Fitch") 'BBB' + Converting a single short-term rating score with different `short_term_stragey` + arguments: + + >>> get_ratings_from_scores( + ... rating_scores=10, + ... rating_provider="DBRS", + ... tenor="short-term", + ... short_term_strategy="best", + ... ) + 'R-2 M' + + >>> get_ratings_from_scores( + ... rating_scores=10, + ... rating_provider="DBRS", + ... tenor="short-term", + ... short_term_strategy="base", + ... ) + 'R-2 L / R-3' + >>> get_ratings_from_scores( - ... rating_scores=5, rating_provider="S&P", tenor="short-term" + ... rating_scores=10, + ... rating_provider="DBRS", + ... tenor="short-term", + ... short_term_strategy="worst", ... ) - 'A-1' + 'R-3' Converting a ``pd.Series`` with scores: @@ -183,6 +224,13 @@ def get_ratings_from_scores( 2 D NaN D """ + if tenor == "short-term" and short_term_strategy is None: + short_term_strategy = "base" + if tenor == "short-term" and short_term_strategy not in ["best", "base", "worst"]: + raise ValueError( + "Invalid short_term_strategy. Must be in ['best', 'base', 'worst']." + ) + if isinstance(rating_scores, (int, float, np.number)): if rating_provider is None: raise ValueError(VALUE_ERROR_PROVIDER_MANDATORY) @@ -193,13 +241,14 @@ def get_ratings_from_scores( ) rtg_dict = _get_translation_dict( - "scores_to_rtg", rating_provider=rating_provider, tenor=tenor + "scores_to_rtg", + rating_provider=rating_provider, + tenor=tenor, + st_rtg_strategy=short_term_strategy, ) if not np.isnan(rating_scores): rating_scores = int(Decimal(f"{rating_scores}").quantize(0, ROUND_HALF_UP)) - # find key (MinScore) in rtg_dict that is nearest to rating_scores - # https://bit.ly/3gdRuhX if tenor == "long-term": return rtg_dict.get(rating_scores, pd.NA) else: @@ -225,7 +274,12 @@ def get_ratings_from_scores( valid_rtg_provider=valid_rtg_agncy[tenor], ) - rtg_dict = _get_translation_dict("scores_to_rtg", rating_provider, tenor=tenor) + rtg_dict = _get_translation_dict( + "scores_to_rtg", + rating_provider, + tenor=tenor, + st_rtg_strategy=short_term_strategy, + ) # round element to full integer, if element is number rating_scores = rating_scores.apply( @@ -270,6 +324,7 @@ def get_ratings_from_scores( rating_scores=rating_scores[col], rating_provider=provider, tenor=tenor, + short_term_strategy=short_term_strategy, ) for col, provider in zip(rating_scores.columns, rating_provider) ], diff --git a/src/pyratings/get_scores.py b/src/pyratings/get_scores.py index 0260de2..9e94d33 100644 --- a/src/pyratings/get_scores.py +++ b/src/pyratings/get_scores.py @@ -83,6 +83,7 @@ def get_scores_from_ratings( ratings: Union[str, pd.Series, pd.DataFrame], rating_provider: Optional[Union[str, List[str]]] = None, tenor: str = "long-term", + short_term_strategy: Optional[str] = None, ) -> Union[int, pd.Series, pd.DataFrame]: """Convert regular ratings into numerical rating scores. @@ -98,6 +99,24 @@ def get_scores_from_ratings( column names. tenor Should contain any valid tenor out of {"long-term", "short-term"} + short_term_strategy + Will only be used, if `tenor` is "short-term". Choose between three distinct + strategies in order to translate a long-term rating score into a short-term + rating. Must be in {"best", "base", "worst"}. + + Compare + https://hsbc.github.io/pyratings/short-term-rating/#there's-one-more-catch... + + - Strategy 1 (best): + Always choose the best possible short-term rating. That's the optimistic + approach. + - Strategy 2 (base-case): + Always choose the short-term rating that a rating agency would usually assign + if there aren't any special liquidity issues (positive or negative). That's + the base-case approach. + - Strategy 3 (worst): + Always choose the worst possible short-term rating. That's the conservative + approach. Returns ------- @@ -117,11 +136,39 @@ def get_scores_from_ratings( Examples -------- - Converting a single rating: + Converting a single long-term rating: >>> get_scores_from_ratings("BBB-", "S&P", tenor="long-term") 10 + Converting a single short-term rating score with different `short_term_stragey` + arguments: + + >>> get_scores_from_ratings( + ... ratings="P-1", + ... rating_provider="Moody", + ... tenor="short-term", + ... short_term_strategy="best" + ... ) + 4.0 + + + >>> get_scores_from_ratings( + ... ratings="P-1", + ... rating_provider="Moody", + ... tenor="short-term", + ... short_term_strategy="base" + ... ) + 3.5 + + >>> get_scores_from_ratings( + ... ratings="P-1", + ... rating_provider="Moody", + ... tenor="short-term", + ... short_term_strategy="worst" + ... ) + 3.0 + Converting a ``pd.Series`` of ratings: >>> import pandas as pd @@ -188,6 +235,13 @@ def get_scores_from_ratings( 2 22 NaN 22.0 """ + if tenor == "short-term" and short_term_strategy is None: + short_term_strategy = "base" + if tenor == "short-term" and short_term_strategy not in ["best", "base", "worst"]: + raise ValueError( + "Invalid short_term_strategy. Must be in ['best', 'base', 'worst']." + ) + if isinstance(ratings, str): if rating_provider is None: raise ValueError(VALUE_ERROR_PROVIDER_MANDATORY) @@ -197,7 +251,12 @@ def get_scores_from_ratings( valid_rtg_provider=valid_rtg_agncy[tenor], ) - rtg_dict = _get_translation_dict("rtg_to_scores", rating_provider, tenor=tenor) + rtg_dict = _get_translation_dict( + "rtg_to_scores", + rating_provider, + tenor=tenor, + st_rtg_strategy=short_term_strategy, + ) return rtg_dict.get(ratings, pd.NA) elif isinstance(ratings, pd.Series): @@ -212,7 +271,12 @@ def get_scores_from_ratings( valid_rtg_provider=valid_rtg_agncy[tenor], ) - rtg_dict = _get_translation_dict("rtg_to_scores", rating_provider, tenor=tenor) + rtg_dict = _get_translation_dict( + "rtg_to_scores", + rating_provider, + tenor=tenor, + st_rtg_strategy=short_term_strategy, + ) return pd.Series(data=ratings.map(rtg_dict), name=f"rtg_score_{ratings.name}") elif isinstance(ratings, pd.DataFrame): @@ -231,7 +295,10 @@ def get_scores_from_ratings( return pd.concat( [ get_scores_from_ratings( - ratings=ratings[col], rating_provider=provider, tenor=tenor + ratings=ratings[col], + rating_provider=provider, + tenor=tenor, + short_term_strategy=short_term_strategy, ) for col, provider in zip(ratings.columns, rating_provider) ], diff --git a/src/pyratings/utils.py b/src/pyratings/utils.py index a43e6a2..65d604a 100644 --- a/src/pyratings/utils.py +++ b/src/pyratings/utils.py @@ -145,7 +145,7 @@ def _scores_to_rtg(tenor: str, strat: str) -> Union[dict[int, str], pd.DataFrame if tenor == "long-term": sql_query = """ SELECT RatingScore, Rating FROM v_ltRatings - WHERE Rating != "SD" and RatingProvider=? + WHERE Rating != 'SD' and RatingProvider=? """ cursor.execute(sql_query, (rating_provider,)) translation_dict = dict(cursor.fetchall()) diff --git a/tests/conftest.py b/tests/conftest.py index 0bf9226..c5f29bd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -163,86 +163,169 @@ # --- short term st_rtg_prov_list = ["Fitch", "Moody", "SP", "DBRS"] -st_rtg_dict = { +st_strategies = ["best", "base", "worst"] +st_rtgs = { "Fitch": ["F1+", "F1", "F2", "F3", "B", "C", "D"], "Moody": ["P-1", "P-2", "P-3", "NP"], "SP": ["A-1+", "A-1", "A-2", "A-3", "B", "C", "D"], - "DBRS": [ - "R-1 H", - "R-1 M", - "R-1 L", - "R-2 H", - "R-2 M", - "R-2 L / R-3", - "R-4", - "R-5", - "D", - ], +} +st_rtg_dict = { + "best": { + "Fitch": st_rtgs["Fitch"], + "Moody": st_rtgs["Moody"], + "SP": st_rtgs["SP"], + "DBRS": [ + "R-1 H", + "R-1 M", + "R-1 L", + "R-2 H", + "R-2 M", + "R-3", + "R-4", + "R-5", + "D", + ], + }, + "base": { + "Fitch": st_rtgs["Fitch"], + "Moody": st_rtgs["Moody"], + "SP": st_rtgs["SP"], + "DBRS": [ + "R-1 H", + "R-1 M", + "R-1 L", + "R-2 H", + "R-2 M", + "R-2 L / R-3", + "R-4", + "R-5", + "D", + ], + }, + "worst": { + "Fitch": st_rtgs["Fitch"], + "Moody": st_rtgs["Moody"], + "SP": st_rtgs["SP"], + "DBRS": [ + "R-1 H", + "R-1 M", + "R-1 L", + "R-2 H", + "R-2 M", + "R-3", + "R-4", + "R-5", + "D", + ], + }, } st_scrs_dict = { - "Fitch": [3.0, 6.5, 8.0, 9.5, 13.5, 18.5, 21.5], - "Moody": [3.5, 7.5, 9.5, 16.5], - "SP": [2.5, 5.5, 8.0, 10.0, 13.5, 19.0, 22.0], - "DBRS": [1.5, 3.5, 6.0, 8.0, 9.0, 10.0, 12.5, 18.0, 22.0], + "best": { + "Fitch": [3.5, 7.5, 9.0, 10.0, 13.5, 18.5, 21.5], + "Moody": [4.0, 8.5, 10.0, 16.5], + "SP": [3.0, 6.5, 8.5, 10.5, 14.0, 19.0, 22.0], + "DBRS": [2.0, 4.5, 7.0, 9.0, 10.0, 11.0, 13.5, 18.5, 22.0], + }, + "base": { + "Fitch": [3.0, 6.5, 8.0, 9.5, 13.5, 18.5, 21.5], + "Moody": [3.5, 7.5, 9.5, 16.5], + "SP": [2.5, 5.5, 8.0, 10.0, 13.5, 19.0, 22.0], + "DBRS": [1.5, 3.5, 6.0, 8.0, 9.0, 10.0, 12.5, 18.0, 22.0], + }, + "worst": { + "Fitch": [2.5, 5.5, 7.5, 9.5, 13.5, 18.5, 21.5], + "Moody": [3.0, 7.0, 9.5, 16.5], + "SP": [2.5, 5.5, 8.0, 10.0, 13.5, 19.0, 22.0], + "DBRS": [1.0, 2.5, 5.0, 7.5, 9.0, 10.0, 12.5, 18.0, 22.0], + }, } -# create tuples for parameterization: (RatingProvider, Rating, RatingScore) -st_prov_rtg_scrs_records = [] -for (k, v_rtg, v_scores) in zip( - st_rtg_prov_list, st_rtg_dict.values(), st_scrs_dict.values() -): - for (x, y) in zip(v_rtg, v_scores): - st_prov_rtg_scrs_records.append((k, x, y)) +# create list of tuples for parameterization: [(Strategy, RatingProvider, Rating, +# RatingScore), ] +st_strat_prov_rtg_scrs_records = [] +for strat in st_strategies: + for (k, v_rtg, v_scores) in zip( + st_rtg_prov_list, st_rtg_dict[strat].values(), st_scrs_dict[strat].values() + ): + for (x, y) in zip(v_rtg, v_scores): + st_strat_prov_rtg_scrs_records.append((strat, k, x, y)) # create long/tidy dataframe st_rtg_df_long = pd.DataFrame.from_records( - st_prov_rtg_scrs_records, - columns=["RatingProvider", "Rating", "RatingScore"], + st_strat_prov_rtg_scrs_records, + columns=["Strategy", "RatingProvider", "Rating", "RatingScore"], ) -# create wide dataframe with rating provider as columns + +def _convert_rtg_long_to_rtg_wide(strat: str) -> pd.DataFrame: + out = pd.concat( + [ + st_rtg_df_long.loc[ + (st_rtg_df_long["RatingProvider"] == rating_provider) + & (st_rtg_df_long["Strategy"] == strat), + "Rating", + ] + .reset_index(drop=True) + .rename(rating_provider) + for rating_provider in st_rtg_prov_list + ], + axis=1, + ) + out.insert(0, "Strategy", strat) + return out + + +# create wide dataframe with ratings in columns and strategies vertically stacked st_rtg_df_wide = pd.concat( - [ - st_rtg_df_long.loc[ - st_rtg_df_long["RatingProvider"] == rating_provider, "Rating" - ] - .reset_index(drop=True) - .rename(rating_provider) - for rating_provider in st_rtg_prov_list - ], - axis=1, -) + [_convert_rtg_long_to_rtg_wide(strat) for strat in st_strategies], axis=0 +).reset_index(drop=True) + + +def _convert_scrs_long_to_rtg_wide(strat: str) -> pd.DataFrame: + out = pd.concat( + [ + st_rtg_df_long.loc[ + (st_rtg_df_long["RatingProvider"] == rating_provider) + & (st_rtg_df_long["Strategy"] == strat), + "RatingScore", + ] + .reset_index(drop=True) + .rename(f"rtg_score_{rating_provider}") + for rating_provider in st_rtg_prov_list + ], + axis=1, + ) + out.insert(0, "Strategy", strat) + return out + +# create wide dataframe with scores in columns and strategies vertically stacked. st_scores_df_wide = pd.concat( - [ - st_rtg_df_long.loc[ - st_rtg_df_long["RatingProvider"] == rating_provider, "RatingScore" - ] - .reset_index(drop=True) - .rename(f"rtg_score_{rating_provider}") - for rating_provider in st_rtg_prov_list - ], - axis=1, -) + [_convert_scrs_long_to_rtg_wide(strat) for strat in st_strategies], axis=0 +).reset_index(drop=True) -st_prov_scores_rtg_series = [ +st_strat_prov_scores_rtg_series = [ ( + strat, rating_provider, st_rtg_df_long.loc[ - st_rtg_df_long["RatingProvider"] == rating_provider, + (st_rtg_df_long["RatingProvider"] == rating_provider) + & (st_rtg_df_long["Strategy"] == strat), "RatingScore", ] .reset_index(drop=True) .squeeze(), st_rtg_df_long.loc[ - st_rtg_df_long["RatingProvider"] == rating_provider, - ["Rating"], + (st_rtg_df_long["RatingProvider"] == rating_provider) + & (st_rtg_df_long["Strategy"] == strat), + "Rating", ] .reset_index(drop=True) .squeeze(), ) for rating_provider in st_rtg_prov_list + for strat in st_strategies ] # --- invalid dataframe ---------------------------------------------------------------- diff --git a/tests/test_get_ratings.py b/tests/test_get_ratings.py index 44ffbc8..fb7296f 100644 --- a/tests/test_get_ratings.py +++ b/tests/test_get_ratings.py @@ -66,20 +66,34 @@ def test_get_rating_from_single_score_float_longterm() -> None: @pytest.mark.parametrize( - ["rating_provider", "rating", "score"], - conftest.st_prov_rtg_scrs_records, + ["strategy", "rating_provider", "rating", "score"], + conftest.st_strat_prov_rtg_scrs_records, ) def test_get_rating_from_single_score_shortterm( - rating_provider: str, score: int, rating: str + strategy: str, rating_provider: str, score: int, rating: str ) -> None: """It returns a human-readable short-term rating.""" act = rtg.get_ratings_from_scores( - rating_scores=score, rating_provider=rating_provider, tenor="short-term" + rating_scores=score, + rating_provider=rating_provider, + tenor="short-term", + short_term_strategy=strategy, ) assert act == rating +def test_get_rating_from_single_score_shortterm_without_specifying_strategy() -> None: + """It returns a human-readable short-term rating.""" + act = rtg.get_ratings_from_scores( + rating_scores=5, + rating_provider="Moody", + tenor="short-term", + ) + + assert act == "P-1" + + def test_get_rating_from_single_score_float_shortterm() -> None: """It returns a human-readable short-term rating.""" assert ( @@ -192,15 +206,21 @@ def test_get_ratings_from_scores_series_longterm_float( @pytest.mark.parametrize( - ["rating_provider", "scores_series", "ratings_series"], - conftest.st_prov_scores_rtg_series, + ["strategy", "rating_provider", "scores_series", "ratings_series"], + conftest.st_strat_prov_scores_rtg_series, ) def test_get_ratings_from_scores_series_shortterm( - rating_provider: str, scores_series: pd.Series, ratings_series: pd.Series + strategy: str, + rating_provider: str, + scores_series: pd.Series, + ratings_series: pd.Series, ) -> None: """It returns a series with human-readable short-term ratings.""" act = rtg.get_ratings_from_scores( - rating_scores=scores_series, rating_provider=rating_provider, tenor="short-term" + rating_scores=scores_series, + rating_provider=rating_provider, + tenor="short-term", + short_term_strategy=strategy, ) ratings_series.name = f"rtg_{rating_provider}" assert_series_equal(act, ratings_series) @@ -331,10 +351,20 @@ def test_get_ratings_from_scores_df_with_explicit_rating_provider_longterm() -> assert_frame_equal(act, exp_lt) -def test_get_ratings_from_scores_df_with_explicit_rating_provider_shortterm() -> None: +@pytest.mark.parametrize("strategy", conftest.st_strategies) +def test_get_ratings_from_scores_df_with_explicit_rating_provider_shortterm( + strategy: str, +) -> None: """It returns a dataframe with human-readable short-term ratings and NaNs.""" + input_df = ( + conftest.st_scores_df_wide.loc[ + conftest.st_scores_df_wide["Strategy"] == strategy + ] + .iloc[:, 1:] + .reset_index(drop=True) + ) act = rtg.get_ratings_from_scores( - rating_scores=conftest.st_scores_df_wide, + rating_scores=input_df, rating_provider=[ "rtg_Fitch", "Moody's rating", @@ -342,8 +372,13 @@ def test_get_ratings_from_scores_df_with_explicit_rating_provider_shortterm() -> "DBRS", ], tenor="short-term", + short_term_strategy=strategy, + ) + exp = ( + conftest.st_rtg_df_wide.loc[conftest.st_rtg_df_wide["Strategy"] == strategy] + .iloc[:, 1:] + .reset_index(drop=True) ) - exp = conftest.st_rtg_df_wide exp = exp.set_axis(["rtg_Fitch", "rtg_Moody", "rtg_SP", "rtg_DBRS"], axis=1) # noinspection PyTypeChecker assert_frame_equal(act, exp) @@ -358,12 +393,28 @@ def test_get_ratings_from_scores_df_by_inferring_rating_provider_longterm() -> N assert_frame_equal(act, exp_lt) -def test_get_ratings_from_scores_df_by_inferring_rating_provider_shortterm() -> None: +@pytest.mark.parametrize("strategy", conftest.st_strategies) +def test_get_ratings_from_scores_df_by_inferring_rating_provider_shortterm( + strategy: str, +) -> None: """It returns a dataframe with human-readable short-term ratings and NaNs.""" + input_df = ( + conftest.st_scores_df_wide.loc[ + conftest.st_scores_df_wide["Strategy"] == strategy + ] + .iloc[:, 1:] + .reset_index(drop=True) + ) act = rtg.get_ratings_from_scores( - rating_scores=conftest.st_scores_df_wide, tenor="short-term" + rating_scores=input_df, + tenor="short-term", + short_term_strategy=strategy, + ) + exp = ( + conftest.st_rtg_df_wide.loc[conftest.st_rtg_df_wide["Strategy"] == strategy] + .iloc[:, 1:] + .reset_index(drop=True) ) - exp = conftest.st_rtg_df_wide exp = exp.set_axis(["rtg_Fitch", "rtg_Moody", "rtg_SP", "rtg_DBRS"], axis=1) # noinspection PyTypeChecker assert_frame_equal(act, exp) @@ -427,3 +478,18 @@ def test_get_ratings_from_invalid_warf_df() -> None: expectations.columns = ["rtg_Fitch", "rtg_DBRS"] # noinspection PyTypeChecker assert_frame_equal(act, expectations, check_dtype=False) + + +def test_invalid_short_term_strategy() -> None: + """It raises an error message.""" + with pytest.raises(ValueError) as err: + rtg.get_ratings_from_scores( + rating_scores=5, + rating_provider="Moody", + tenor="short-term", + short_term_strategy="foo", + ) + + assert str(err.value) == ( + "Invalid short_term_strategy. Must be in ['best', 'base', 'worst']." + ) diff --git a/tests/test_get_scores.py b/tests/test_get_scores.py index 5fcbe73..29b7f6a 100644 --- a/tests/test_get_scores.py +++ b/tests/test_get_scores.py @@ -50,20 +50,34 @@ def test_get_scores_from_single_rating_longterm( @pytest.mark.parametrize( - ["rating_provider", "rating", "score"], - conftest.st_prov_rtg_scrs_records, + ["strategy", "rating_provider", "rating", "score"], + conftest.st_strat_prov_rtg_scrs_records, ) def test_get_scores_from_single_rating_shortterm( - rating_provider: str, rating: str, score: int + strategy: str, rating_provider: str, rating: str, score: int ) -> None: """It returns a rating score.""" act = rtg.get_scores_from_ratings( - ratings=rating, rating_provider=rating_provider, tenor="short-term" + ratings=rating, + rating_provider=rating_provider, + tenor="short-term", + short_term_strategy=strategy, ) assert act == score +def test_get_scores_from_single_rating_shortterm_without_specifying_strategy() -> None: + """It returns a human-readable short-term rating.""" + act = rtg.get_scores_from_ratings( + ratings="P-1", + rating_provider="Moody", + tenor="short-term", + ) + + assert act == 3.5 + + @pytest.mark.parametrize("tenor", ["long-term", "short-term"]) def test_get_scores_from_single_rating_invalid_rating_provider(tenor: str) -> None: """It raises an error message.""" @@ -135,16 +149,22 @@ def test_get_scores_from_ratings_series_longterm( @pytest.mark.parametrize( - ["rating_provider", "scores_series", "ratings_series"], - conftest.st_prov_scores_rtg_series, + ["strategy", "rating_provider", "scores_series", "ratings_series"], + conftest.st_strat_prov_scores_rtg_series, ) def test_get_scores_from_ratings_series_shortterm( - rating_provider: str, ratings_series: pd.Series, scores_series: pd.Series + strategy: str, + rating_provider: str, + ratings_series: pd.Series, + scores_series: pd.Series, ) -> None: """It returns a series with rating scores.""" scores_series.name = f"rtg_score_{ratings_series.name}" act = rtg.get_scores_from_ratings( - ratings=ratings_series, rating_provider=rating_provider, tenor="short-term" + ratings=ratings_series, + rating_provider=rating_provider, + tenor="short-term", + short_term_strategy=strategy, ) assert_series_equal(act, scores_series) @@ -237,10 +257,18 @@ def test_get_scores_from_ratings_df_with_explicit_rating_provider_longterm() -> assert_frame_equal(act, exp_lt) -def test_get_scores_from_ratings_df_with_explicit_rating_provider_shortterm() -> None: +@pytest.mark.parametrize("strategy", conftest.st_strategies) +def test_get_scores_from_ratings_df_with_explicit_rating_provider_shortterm( + strategy: str, +) -> None: """It returns a dataframe with rating scores and NaNs.""" + input_df = ( + conftest.st_rtg_df_wide.loc[conftest.st_rtg_df_wide["Strategy"] == strategy] + .iloc[:, 1:] + .reset_index(drop=True) + ) act = rtg.get_scores_from_ratings( - ratings=conftest.st_rtg_df_wide, + ratings=input_df, rating_provider=[ "rtg_Fitch", "Moody's rating", @@ -248,9 +276,17 @@ def test_get_scores_from_ratings_df_with_explicit_rating_provider_shortterm() -> "DBRS", ], tenor="short-term", + short_term_strategy=strategy, + ) + exp = ( + conftest.st_scores_df_wide.loc[ + conftest.st_scores_df_wide["Strategy"] == strategy + ] + .iloc[:, 1:] + .reset_index(drop=True) ) # noinspection PyTypeChecker - assert_frame_equal(act, conftest.st_scores_df_wide) + assert_frame_equal(act, exp) def test_get_scores_from_ratings_df_by_inferring_rating_provider_longterm() -> None: @@ -262,13 +298,28 @@ def test_get_scores_from_ratings_df_by_inferring_rating_provider_longterm() -> N assert_frame_equal(act, exp_lt) -def test_get_scores_from_ratings_df_by_inferring_rating_provider_shortterm() -> None: +@pytest.mark.parametrize("strategy", conftest.st_strategies) +def test_get_scores_from_ratings_df_by_inferring_rating_provider_shortterm( + strategy: str, +) -> None: """It returns a dataframe with rating scores and NaNs.""" + input_df = ( + conftest.st_rtg_df_wide.loc[conftest.st_rtg_df_wide["Strategy"] == strategy] + .iloc[:, 1:] + .reset_index(drop=True) + ) act = rtg.get_scores_from_ratings( - ratings=conftest.st_rtg_df_wide, tenor="short-term" + ratings=input_df, tenor="short-term", short_term_strategy=strategy + ) + exp = ( + conftest.st_scores_df_wide.loc[ + conftest.st_scores_df_wide["Strategy"] == strategy + ] + .iloc[:, 1:] + .reset_index(drop=True) ) # noinspection PyTypeChecker - assert_frame_equal(act, conftest.st_scores_df_wide) + assert_frame_equal(act, exp) @pytest.mark.parametrize("tenor", ["long-term", "short-term"]) @@ -309,3 +360,18 @@ def test_get_scores_from_invalid_warf_df() -> None: expectations.columns = ["rtg_score_Fitch", "rtg_score_DBRS"] # noinspection PyTypeChecker assert_frame_equal(act, expectations) + + +def test_invalid_short_term_strategy() -> None: + """It raises an error message.""" + with pytest.raises(ValueError) as err: + rtg.get_scores_from_ratings( + ratings="P-2", + rating_provider="Moody", + tenor="short-term", + short_term_strategy="foo", + ) + + assert str(err.value) == ( + "Invalid short_term_strategy. Must be in ['best', 'base', 'worst']." + )