diff --git a/examples/02_meta-analysis/plot_meta-analysis_walkthrough.py b/examples/02_meta-analysis/plot_meta-analysis_walkthrough.py index 2bce6de..aba811d 100644 --- a/examples/02_meta-analysis/plot_meta-analysis_walkthrough.py +++ b/examples/02_meta-analysis/plot_meta-analysis_walkthrough.py @@ -146,13 +146,13 @@ # # Notice that these models don't use :class:`~pymare.core.Dataset` objects. stouff = estimators.StoufferCombinationTest() -stouff.fit(z[:, None]) +stouff.fit_transform(z[:, None]) print("Stouffers") print("p: {}".format(stouff.params_["p"])) print() fisher = estimators.FisherCombinationTest() -fisher.fit(z[:, None]) +fisher.fit_transform(z[:, None]) print("Fishers") print("p: {}".format(fisher.params_["p"])) @@ -162,11 +162,10 @@ # This estimator does not attempt to estimate between-study variance. # Instead, it takes ``tau2`` (:math:`\tau^{2}`) as an argument. wls = estimators.WeightedLeastSquares() -wls.fit_dataset(dset) -wls_summary = wls.summary() -results["Weighted Least Squares"] = wls_summary.to_df() +wls_results = wls.fit_transform(dset) +results["Weighted Least Squares"] = wls_results.to_df() print("Weighted Least Squares") -print(wls_summary.to_df().T) +print(wls_results.to_df().T) ############################################################################### # Methods that estimate between-study variance @@ -180,54 +179,48 @@ # can use either maximum-likelihood (ML) or restricted maximum-likelihood (REML) # to iteratively estimate it. dsl = estimators.DerSimonianLaird() -dsl.fit_dataset(dset) -dsl_summary = dsl.summary() -results["DerSimonian-Laird"] = dsl_summary.to_df() +dsl_results = dsl.fit_transform(dset) +results["DerSimonian-Laird"] = dsl_results.to_df() print("DerSimonian-Laird") -print(dsl_summary.to_df().T) +print(dsl_results.to_df().T) print() hedge = estimators.Hedges() -hedge.fit_dataset(dset) -hedge_summary = hedge.summary() -results["Hedges"] = hedge_summary.to_df() +hedge_results = hedge.fit_transform(dset) +results["Hedges"] = hedge_results.to_df() print("Hedges") -print(hedge_summary.to_df().T) +print(hedge_results.to_df().T) print() vb_ml = estimators.VarianceBasedLikelihoodEstimator(method="ML") -vb_ml.fit_dataset(dset) -vb_ml_summary = vb_ml.summary() -results["Variance-Based with ML"] = vb_ml_summary.to_df() +vb_ml_results = vb_ml.fit_transform(dset) +results["Variance-Based with ML"] = vb_ml_results.to_df() print("Variance-Based with ML") -print(vb_ml_summary.to_df().T) +print(vb_ml_results.to_df().T) print() vb_reml = estimators.VarianceBasedLikelihoodEstimator(method="REML") -vb_reml.fit_dataset(dset) -vb_reml_summary = vb_reml.summary() -results["Variance-Based with REML"] = vb_reml_summary.to_df() +vb_reml_results = vb_reml.fit_transform(dset) +results["Variance-Based with REML"] = vb_reml_results.to_df() print("Variance-Based with REML") -print(vb_reml_summary.to_df().T) +print(vb_reml_results.to_df().T) print() # The ``SampleSizeBasedLikelihoodEstimator`` estimates between-study variance # using ``y`` and ``n``, but assumes within-study variance is homogenous # across studies. sb_ml = estimators.SampleSizeBasedLikelihoodEstimator(method="ML") -sb_ml.fit_dataset(dset) -sb_ml_summary = sb_ml.summary() -results["Sample Size-Based with ML"] = sb_ml_summary.to_df() +sb_ml_results = sb_ml.fit_transform(dset) +results["Sample Size-Based with ML"] = sb_ml_results.to_df() print("Sample Size-Based with ML") -print(sb_ml_summary.to_df().T) +print(sb_ml_results.to_df().T) print() sb_reml = estimators.SampleSizeBasedLikelihoodEstimator(method="REML") -sb_reml.fit_dataset(dset) -sb_reml_summary = sb_reml.summary() -results["Sample Size-Based with REML"] = sb_reml_summary.to_df() +sb_reml_results = sb_reml.fit_transform(dset) +results["Sample Size-Based with REML"] = sb_reml_results.to_df() print("Sample Size-Based with REML") -print(sb_reml_summary.to_df().T) +print(sb_reml_results.to_df().T) ############################################################################### # What about the Stan estimator? @@ -240,10 +233,10 @@ # ````````````````````````````````````````````````````````````````````````````` fig, ax = plt.subplots(figsize=(6, 6)) -for i, (estimator_name, summary_df) in enumerate(results.items()): - ax.scatter((summary_df.loc[0, "estimate"],), (i + 1,), label=estimator_name) +for i, (estimator_name, results_df) in enumerate(results.items()): + ax.scatter((results_df.loc[0, "estimate"],), (i + 1,), label=estimator_name) ax.plot( - (summary_df.loc[0, "ci_0.025"], summary_df.loc[0, "ci_0.975"]), + (results_df.loc[0, "ci_0.025"], results_df.loc[0, "ci_0.975"]), (i + 1, i + 1), linewidth=3, ) diff --git a/examples/02_meta-analysis/plot_run_meta-analysis.py b/examples/02_meta-analysis/plot_run_meta-analysis.py index aa3b71d..16986d8 100644 --- a/examples/02_meta-analysis/plot_run_meta-analysis.py +++ b/examples/02_meta-analysis/plot_run_meta-analysis.py @@ -52,11 +52,11 @@ # so it can be useful for large-scale meta-analyses, # such as neuroimaging image-based meta-analyses. # -# The :meth:`~pymare.estimators.estimators.BaseEstimator.summary` function +# The :meth:`~pymare.estimators.estimators.BaseEstimator.transform` function # will return a :class:`~pymare.results.MetaRegressionResults` object, # which contains the results of the analysis. -est = estimators.WeightedLeastSquares().fit_dataset(dset) -results = est.summary() +est = estimators.WeightedLeastSquares() +results = est.fit_transform(dset) results.to_df() ############################################################################### diff --git a/pymare/core.py b/pymare/core.py index 7fc7b60..7757545 100644 --- a/pymare/core.py +++ b/pymare/core.py @@ -260,5 +260,4 @@ def meta_regression( # Get estimates est = est_cls(**kwargs) - est.fit_dataset(data) - return est.summary() + return est.fit_transform(data) diff --git a/pymare/estimators/combination.py b/pymare/estimators/combination.py index f2ffabc..b83c500 100644 --- a/pymare/estimators/combination.py +++ b/pymare/estimators/combination.py @@ -34,9 +34,9 @@ def p_value(self, z, *args, **kwargs): def _z_to_p(self, z): return ss.norm.sf(z) - def fit(self, z, *args, **kwargs): + def _fit(self, z, *args, **kwargs): """Fit the estimator to z-values.""" - # This resets the Estimator's dataset_ attribute. fit_dataset will overwrite if called. + # This resets the Estimator's dataset_ attribute. fit() will overwrite if called. self.dataset_ = None if self.mode == "concordant": @@ -51,16 +51,20 @@ def fit(self, z, *args, **kwargs): self.params_ = {"p": p} return self - def summary(self): - """Generate a summary of the estimator results.""" + def transform(self): + """Generate a transform of the estimator results.""" if not hasattr(self, "params_"): name = self.__class__.__name__ raise ValueError( "This {} instance hasn't been fitted yet. Please " - "call fit() before summary().".format(name) + "call _fit() or fit() before transform().".format(name) ) return CombinationTestResults(self, self.dataset_, p=self.params_["p"]) + def fit_transform(self, z, *args, **kwargs): + """Fit the estimator to z-values, then transform it.""" + return self.fit(z, *args, **kwargs).transform() + class StoufferCombinationTest(CombinationTest): """Stouffer's Z-score meta-analysis method. @@ -112,9 +116,9 @@ class StoufferCombinationTest(CombinationTest): # Maps Dataset attributes onto fit() args; see BaseEstimator for details. _dataset_attr_map = {"z": "y", "w": "v"} - def fit(self, z, w=None): + def _fit(self, z, w=None): """Fit the estimator to z-values, optionally with weights.""" - return super().fit(z, w=w) + return super()._fit(z, w=w) def p_value(self, z, w=None): """Calculate p-values.""" diff --git a/pymare/estimators/estimators.py b/pymare/estimators/estimators.py index 7a2b1a9..94026e1 100644 --- a/pymare/estimators/estimators.py +++ b/pymare/estimators/estimators.py @@ -1,6 +1,6 @@ """Meta-regression estimator classes.""" -from abc import ABCMeta, abstractmethod +from abc import ABCMeta from inspect import getfullargspec from warnings import warn @@ -62,12 +62,7 @@ class BaseEstimator(metaclass=ABCMeta): # (e.g., 'z'). _dataset_attr_map = {} - @abstractmethod - def fit(self, *args, **kwargs): - """Fit the estimator to data.""" - pass - - def fit_dataset(self, dataset, *args, **kwargs): + def fit(self, dataset, *args, **kwargs): """Apply the current estimator to the passed Dataset container. A convenience interface that wraps fit() and automatically aligns the @@ -83,7 +78,7 @@ def fit_dataset(self, dataset, *args, **kwargs): Optional keyword arguments to pass onto the :meth:`~pymare.core.Dataset.fit` method. """ all_kwargs = {} - spec = getfullargspec(self.fit) + spec = getfullargspec(self._fit) n_kw = len(spec.defaults) if spec.defaults else 0 n_args = len(spec.args) - n_kw - 1 @@ -96,7 +91,7 @@ def fit_dataset(self, dataset, *args, **kwargs): all_kwargs[name] = getattr(dataset, attr_name) all_kwargs.update(kwargs) - self.fit(*args, **all_kwargs) + self._fit(*args, **all_kwargs) self.dataset_ = dataset return self @@ -140,7 +135,7 @@ def get_v(self, dataset): return self.params_["sigma2"] / dataset.n - def summary(self): + def transform(self): """Generate a MetaRegressionResults object for the fitted estimator. Returns @@ -157,6 +152,27 @@ def summary(self): p = self.params_ return MetaRegressionResults(self, self.dataset_, p["fe_params"], p["inv_cov"], p["tau2"]) + def fit_transform(self, dataset, *args, **kwargs): + """Fit to data, then transform it. + + Fits transformer dataset and returns a MetaRegressionResults object for the + fitted estimator. + + Parameters + ---------- + dataset : :obj:`~pymare.core.Dataset` + A PyMARE Dataset instance holding the data. + *args + Optional positional arguments to pass onto the :meth:`~pymare.core.Dataset.fit` method. + **kwargs + Optional keyword arguments to pass onto the :meth:`~pymare.core.Dataset.fit` method. + + Returns + ------- + :obj:`~pymare.results.MetaRegressionResults` + """ + return self.fit(dataset, *args, **kwargs).transform() + class WeightedLeastSquares(BaseEstimator): """Weighted least-squares meta-regression. @@ -190,7 +206,7 @@ class WeightedLeastSquares(BaseEstimator): def __init__(self, tau2=0.0): self.tau2 = tau2 - def fit(self, y, X, v=None): + def _fit(self, y, X, v=None): """Fit the estimator to data. Parameters @@ -206,7 +222,7 @@ def fit(self, y, X, v=None): ------- :obj:`~pymare.estimators.WeightedLeastSquares` """ - # This resets the Estimator's dataset_ attribute. fit_dataset will overwrite if called. + # This resets the Estimator's dataset_ attribute. fit will overwrite if called. self.dataset_ = None if v is None: @@ -235,7 +251,7 @@ class DerSimonianLaird(BaseEstimator): .. footbibliography:: """ - def fit(self, y, v, X): + def _fit(self, y, v, X): """Fit the estimator to data. Parameters @@ -299,7 +315,7 @@ class Hedges(BaseEstimator): .. footbibliography:: """ - def fit(self, y, v, X): + def _fit(self, y, v, X): """Fit the estimator to data. Parameters @@ -367,7 +383,7 @@ def __init__(self, method="ml", **kwargs): self.kwargs = kwargs @_loopable - def fit(self, y, v, X): + def _fit(self, y, v, X): """Fit the estimator to data. Parameters @@ -387,7 +403,7 @@ def fit(self, y, v, X): self.dataset_ = None # use D-L estimate for initial values - est_DL = DerSimonianLaird().fit(y, v, X).params_ + est_DL = DerSimonianLaird()._fit(y, v, X).params_ beta = est_DL["fe_params"] tau2 = est_DL["tau2"] @@ -460,7 +476,7 @@ def __init__(self, method="ml", **kwargs): self.kwargs = kwargs @_loopable - def fit(self, y, n, X): + def _fit(self, y, n, X): """Fit the estimator to data. Parameters diff --git a/pymare/results.py b/pymare/results.py index b84b1f6..aecbc93 100644 --- a/pymare/results.py +++ b/pymare/results.py @@ -37,7 +37,7 @@ class MetaRegressionResults: Warning ------- - When an Estimator is fitted to arrays directly using the ``fit`` method, the Results object's + When an Estimator is fitted to arrays directly using the ``_fit`` method, the Results object's utility is limited. Many methods will not work. """ @@ -336,7 +336,7 @@ def permutation_test(self, n_perm=1000): y_perm = np.repeat(y[:, None], n_perm, axis=1) # for v, we might actually be working with n, depending on estimator - has_v = "v" in getfullargspec(self.estimator.fit).args[1:] + has_v = "v" in getfullargspec(self.estimator._fit).args[1:] v = self.dataset.v[:, i] if has_v else self.dataset.n[:, i] v_perm = np.repeat(v[:, None], n_perm, axis=1) @@ -362,7 +362,7 @@ def permutation_test(self, n_perm=1000): # Pass parameters, remembering that v may actually be n kwargs = {"y": y_perm, "X": self.dataset.X} kwargs["v" if has_v else "n"] = v_perm - params = self.estimator.fit(**kwargs).params_ + params = self.estimator._fit(**kwargs).params_ fe_obs = fe_stats["est"][:, i] if fe_obs.ndim == 1: @@ -483,9 +483,9 @@ def permutation_test(self, n_perm=1000): # Some combination tests can handle weights (passed as v) kwargs = {"z": y_perm} - if "w" in getfullargspec(est.fit).args: + if "w" in getfullargspec(est._fit).args: kwargs["w"] = self.dataset.v - params = est.fit(**kwargs).params_ + params = est._fit(**kwargs).params_ p_obs = self.z[i] if p_obs.ndim == 1: diff --git a/pymare/stats.py b/pymare/stats.py index 83cdd69..91c42b6 100644 --- a/pymare/stats.py +++ b/pymare/stats.py @@ -106,7 +106,7 @@ def q_profile(y, v, X, alpha=0.05): # value, minimize() sometimes fails to stay in bounds. from .estimators import DerSimonianLaird - ub_start = 2 * DerSimonianLaird().fit(y, v, X).params_["tau2"] + ub_start = 2 * DerSimonianLaird()._fit(y, v, X).params_["tau2"] lb = minimize(lambda x: (q_gen(*args, x) - l_crit) ** 2, [0], bounds=bds).x[0] ub = minimize(lambda x: (q_gen(*args, x) - u_crit) ** 2, ub_start, bounds=bds).x[0] diff --git a/pymare/tests/test_combination_tests.py b/pymare/tests/test_combination_tests.py index feacbc9..17bcccf 100644 --- a/pymare/tests/test_combination_tests.py +++ b/pymare/tests/test_combination_tests.py @@ -28,7 +28,7 @@ @pytest.mark.parametrize("Cls,data,mode,expected", _params) def test_combination_test(Cls, data, mode, expected): """Test CombinationTest Estimators with numpy data.""" - results = Cls(mode).fit(data).params_ + results = Cls(mode)._fit(data).params_ z = ss.norm.isf(results["p"]) assert np.allclose(z, expected, atol=1e-5) @@ -37,7 +37,7 @@ def test_combination_test(Cls, data, mode, expected): def test_combination_test_from_dataset(Cls, data, mode, expected): """Test CombinationTest Estimators with PyMARE Datasets.""" dset = Dataset(y=data) - est = Cls(mode).fit_dataset(dset) - results = est.summary() + est = Cls(mode) + results = est.fit_transform(dset) z = ss.norm.isf(results.p) assert np.allclose(z, expected, atol=1e-5) diff --git a/pymare/tests/test_estimators.py b/pymare/tests/test_estimators.py index 339e163..939769b 100644 --- a/pymare/tests/test_estimators.py +++ b/pymare/tests/test_estimators.py @@ -15,8 +15,8 @@ def test_weighted_least_squares_estimator(dataset): """Test WeightedLeastSquares estimator.""" # ground truth values are from metafor package in R - est = WeightedLeastSquares().fit_dataset(dataset) - results = est.summary() + est = WeightedLeastSquares() + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -35,8 +35,8 @@ def test_weighted_least_squares_estimator(dataset): assert tau2 == 0.0 # With non-zero tau^2 - est = WeightedLeastSquares(8.0).fit_dataset(dataset) - results = est.summary() + est = WeightedLeastSquares(8.0) + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 assert np.allclose(beta.ravel(), [-0.1071, 0.7657], atol=1e-4) assert tau2 == 8.0 @@ -45,8 +45,8 @@ def test_weighted_least_squares_estimator(dataset): def test_dersimonian_laird_estimator(dataset): """Test DerSimonianLaird estimator.""" # ground truth values are from metafor package in R - est = DerSimonianLaird().fit_dataset(dataset) - results = est.summary() + est = DerSimonianLaird() + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -67,7 +67,8 @@ def test_dersimonian_laird_estimator(dataset): def test_2d_DL_estimator(dataset_2d): """Test DerSimonianLaird estimator on 2D Dataset.""" - results = DerSimonianLaird().fit_dataset(dataset_2d).summary() + est = DerSimonianLaird() + results = est.fit_transform(dataset_2d) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -97,8 +98,8 @@ def test_hedges_estimator(dataset): # ground truth values are from metafor package in R, except that metafor # always gives negligibly different values for tau2, likely due to # algorithmic differences in the computation. - est = Hedges().fit_dataset(dataset) - results = est.summary() + est = Hedges() + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -119,7 +120,8 @@ def test_hedges_estimator(dataset): def test_2d_hedges(dataset_2d): """Test Hedges estimator on 2D Dataset.""" - results = Hedges().fit_dataset(dataset_2d).summary() + est = Hedges() + results = est.fit_transform(dataset_2d) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -146,8 +148,8 @@ def test_2d_hedges(dataset_2d): def test_variance_based_maximum_likelihood_estimator(dataset): """Test VarianceBasedLikelihoodEstimator estimator.""" # ground truth values are from metafor package in R - est = VarianceBasedLikelihoodEstimator(method="ML").fit_dataset(dataset) - results = est.summary() + est = VarianceBasedLikelihoodEstimator(method="ML") + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -169,8 +171,8 @@ def test_variance_based_maximum_likelihood_estimator(dataset): def test_variance_based_restricted_maximum_likelihood_estimator(dataset): """Test VarianceBasedLikelihoodEstimator estimator with REML.""" # ground truth values are from metafor package in R - est = VarianceBasedLikelihoodEstimator(method="REML").fit_dataset(dataset) - results = est.summary() + est = VarianceBasedLikelihoodEstimator(method="REML") + results = est.fit_transform(dataset) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -192,8 +194,8 @@ def test_variance_based_restricted_maximum_likelihood_estimator(dataset): def test_sample_size_based_maximum_likelihood_estimator(dataset_n): """Test SampleSizeBasedLikelihoodEstimator estimator.""" # test values have not been verified for convergence with other packages - est = SampleSizeBasedLikelihoodEstimator(method="ML").fit_dataset(dataset_n) - results = est.summary() + est = SampleSizeBasedLikelihoodEstimator(method="ML") + results = est.fit_transform(dataset_n) beta = results.fe_params sigma2 = results.estimator.params_["sigma2"] tau2 = results.tau2 @@ -219,8 +221,8 @@ def test_sample_size_based_maximum_likelihood_estimator(dataset_n): def test_sample_size_based_restricted_maximum_likelihood_estimator(dataset_n): """Test SampleSizeBasedLikelihoodEstimator REML estimator.""" # test values have not been verified for convergence with other packages - est = SampleSizeBasedLikelihoodEstimator(method="REML").fit_dataset(dataset_n) - results = est.summary() + est = SampleSizeBasedLikelihoodEstimator(method="REML") + results = est.fit_transform(dataset_n) beta = results.fe_params sigma2 = results.estimator.params_["sigma2"] tau2 = results.tau2 @@ -245,8 +247,8 @@ def test_sample_size_based_restricted_maximum_likelihood_estimator(dataset_n): def test_2d_looping(dataset_2d): """Test 2D looping in estimators.""" - est = VarianceBasedLikelihoodEstimator().fit_dataset(dataset_2d) - results = est.summary() + est = VarianceBasedLikelihoodEstimator() + results = est.fit_transform(dataset_2d) beta, tau2 = results.fe_params, results.tau2 fe_stats = results.get_fe_stats() @@ -278,6 +280,6 @@ def test_2d_loop_warning(dataset_2d): dataset = Dataset(y, v) # Warning is raised when 2nd dim is > 10 with pytest.warns(UserWarning, match="Input contains"): - est.fit_dataset(dataset) + est.fit(dataset) # But not when it's smaller - est.fit_dataset(dataset_2d) + est.fit(dataset_2d) diff --git a/pymare/tests/test_results.py b/pymare/tests/test_results.py index 2baf3bd..e537f5a 100644 --- a/pymare/tests/test_results.py +++ b/pymare/tests/test_results.py @@ -17,20 +17,20 @@ def fitted_estimator(dataset): """Create a fitted Estimator as a fixture.""" est = DerSimonianLaird() - return est.fit_dataset(dataset) + return est.fit(dataset) @pytest.fixture def results(fitted_estimator): """Create a results object as a fixture.""" - return fitted_estimator.summary() + return fitted_estimator.transform() @pytest.fixture def results_2d(fitted_estimator, dataset_2d): """Create a 2D results object as a fixture.""" est = VarianceBasedLikelihoodEstimator() - return est.fit_dataset(dataset_2d).summary() + return est.fit_transform(dataset_2d) def test_meta_regression_results_from_arrays(dataset): @@ -41,8 +41,8 @@ def test_meta_regression_results_from_arrays(dataset): See https://github.com/neurostuff/PyMARE/issues/52 for more info. """ est = DerSimonianLaird() - fitted_estimator = est.fit(y=dataset.y, X=dataset.X, v=dataset.v) - results = fitted_estimator.summary() + fitted_estimator = est._fit(y=dataset.y, X=dataset.X, v=dataset.v) + results = fitted_estimator.transform() assert isinstance(results, MetaRegressionResults) assert results.fe_params.shape == (2, 1) assert results.fe_cov.shape == (2, 2, 1) @@ -50,15 +50,15 @@ def test_meta_regression_results_from_arrays(dataset): # fit overwrites dataset_ attribute with None assert fitted_estimator.dataset_ is None - # fit_dataset overwrites it with the Dataset - fitted_estimator.fit_dataset(dataset) + # fit overwrites it with the Dataset + fitted_estimator.fit(dataset) assert isinstance(fitted_estimator.dataset_, Dataset) # fit sets it back to None - fitted_estimator.fit(y=dataset.y, X=dataset.X, v=dataset.v) + fitted_estimator._fit(y=dataset.y, X=dataset.X, v=dataset.v) assert fitted_estimator.dataset_ is None # Some methods are not available if fit was used - results = fitted_estimator.summary() + results = fitted_estimator.transform() with pytest.raises(ValueError): results.get_re_stats() @@ -79,23 +79,23 @@ def test_combination_test_results_from_arrays(dataset): to fail when Estimators were fitted to arrays instead of Datasets. See https://github.com/neurostuff/PyMARE/issues/52 for more info. """ - fitted_estimator = StoufferCombinationTest().fit(z=dataset.y) - results = fitted_estimator.summary() + fitted_estimator = StoufferCombinationTest()._fit(z=dataset.y) + results = fitted_estimator.transform() assert isinstance(results, CombinationTestResults) assert results.p.shape == (1,) # fit overwrites dataset_ attribute with None assert fitted_estimator.dataset_ is None # fit_dataset overwrites it with the Dataset - fitted_estimator.fit_dataset(dataset) + fitted_estimator.fit(dataset) assert isinstance(fitted_estimator.dataset_, Dataset) # fit sets it back to None - fitted_estimator.fit(z=dataset.y) + fitted_estimator._fit(z=dataset.y) assert fitted_estimator.dataset_ is None # Some methods are not available if fit was used with pytest.raises(ValueError): - fitted_estimator.summary().permutation_test(1000) + fitted_estimator.transform().permutation_test(1000) def test_meta_regression_results_init_1d(fitted_estimator): @@ -104,7 +104,7 @@ def test_meta_regression_results_init_1d(fitted_estimator): results = MetaRegressionResults( est, est.dataset_, est.params_["fe_params"], est.params_["inv_cov"], est.params_["tau2"] ) - assert isinstance(est.summary(), MetaRegressionResults) + assert isinstance(est.transform(), MetaRegressionResults) assert results.fe_params.shape == (2, 1) assert results.fe_cov.shape == (2, 2, 1) assert results.tau2.shape == (1,) @@ -167,21 +167,29 @@ def test_mrr_to_df(results): assert np.allclose(df["p-value"].values, [0.9678, 0.4369], atol=1e-4) -def test_estimator_summary(dataset): - """Test Estimator's summary method.""" +def test_estimator_transform(dataset): + """Test Estimator's transform method.""" est = WeightedLeastSquares() # Fails if we haven't fitted yet with pytest.raises(ValueError): - est.summary() + est.transform() + + est.fit(dataset) + transform = est.transform() + assert isinstance(transform, MetaRegressionResults) + + +def test_estimator_fit_transform(dataset): + """Test Estimator's fit_transform method.""" + est = WeightedLeastSquares() - est.fit_dataset(dataset) - summary = est.summary() - assert isinstance(summary, MetaRegressionResults) + transform = est.fit_transform(dataset) + assert isinstance(transform, MetaRegressionResults) def test_exact_perm_test_2d_no_mods(small_dataset_2d): """Test the exact permutation test on 2D data.""" - results = DerSimonianLaird().fit_dataset(small_dataset_2d).summary() + results = DerSimonianLaird().fit(small_dataset_2d).transform() pmr = results.permutation_test(1000) assert pmr.n_perm == 8 assert pmr.exact @@ -203,7 +211,7 @@ def test_approx_perm_test_1d_with_mods(results): def test_exact_perm_test_1d_no_mods(): """Test the exact permutation test on 1D data.""" dataset = Dataset([1, 1, 2, 1.3], [1.5, 1, 2, 4]) - results = DerSimonianLaird().fit_dataset(dataset).summary() + results = DerSimonianLaird().fit_transform(dataset) pmr = results.permutation_test(867) assert pmr.n_perm == 16 assert pmr.exact @@ -214,7 +222,7 @@ def test_exact_perm_test_1d_no_mods(): def test_approx_perm_test_with_n_based_estimator(dataset_n): """Test the approximate permutation test on an sample size-based Estimator.""" - results = SampleSizeBasedLikelihoodEstimator().fit_dataset(dataset_n).summary() + results = SampleSizeBasedLikelihoodEstimator().fit_transform(dataset_n) pmr = results.permutation_test(100) assert pmr.n_perm == 100 assert not pmr.exact @@ -226,7 +234,7 @@ def test_approx_perm_test_with_n_based_estimator(dataset_n): def test_stouffers_perm_test_exact(): """Test the exact permutation test on Stouffers Estimator.""" dataset = Dataset([1, 1, 2, 1.3], [1.5, 1, 2, 4]) - results = StoufferCombinationTest().fit_dataset(dataset).summary() + results = StoufferCombinationTest().fit(dataset).transform() pmr = results.permutation_test(2000) assert pmr.n_perm == 16 assert pmr.exact @@ -239,7 +247,7 @@ def test_stouffers_perm_test_approx(): """Test the approximate permutation test on Stouffers Estimator.""" y = [2.8, -0.2, -1, 4.5, 1.9, 2.38, 0.6, 1.88, -0.4, 1.5, 3.163, 0.7] dataset = Dataset(y) - results = StoufferCombinationTest().fit_dataset(dataset).summary() + results = StoufferCombinationTest().fit(dataset).transform() pmr = results.permutation_test(2000) assert not pmr.exact assert pmr.n_perm == 2000