Skip to content

Commit

Permalink
remove other information.
Browse files Browse the repository at this point in the history
  • Loading branch information
ypriverol committed Jun 2, 2024
1 parent 8163d24 commit 75b3382
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 14 deletions.
14 changes: 7 additions & 7 deletions ibaqpy/ibaq/combiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

class Combiner:
def __init__(
self, data_folder: os.PathLike, covariate: str = None, organism: str = "HUMAN"
self, data_folder: os.PathLike, covariate: str = None, organism: str = "HUMAN"
):
"""Generate concated IbaqNorm and metadata."""
self.df_pca = compute_pca(self.df_corrected.T, n_components=5)
Expand Down Expand Up @@ -114,11 +114,11 @@ def imputer(self, covariate_to_keep: list = None):
print(self.df.head)

def outlier_removal(
self,
n_components: int = None,
min_cluster_size: int = None,
min_samples_num: int = None,
n_iter: int = None,
self,
n_components: int = None,
min_cluster_size: int = None,
min_samples_num: int = None,
n_iter: int = None,
):
logger.info("Removing outliers from imputed data ...")
# Apply iterative outlier removal on imputed data
Expand Down Expand Up @@ -164,7 +164,7 @@ def outlier_removal(
)

def batch_correction(
self, n_components: int = None, tissue_parts_to_keep: int = None
self, n_components: int = None, tissue_parts_to_keep: int = None
):
logger.info("Applying batch effect correction ...")
# Plot PCA of uncorrected imputed data
Expand Down
14 changes: 7 additions & 7 deletions ibaqpy/ibaq/peptide_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,18 +540,18 @@ def peptide_normalization(
raise FileNotFoundError("The file does not exist.")

print("Loading data..")
F = Feature(parquet)
feature = Feature(parquet)
if sdrf:
technical_repetitions, label, sample_names, choice = analyse_sdrf(sdrf)
else:
technical_repetitions, label, sample_names, choice = F.experimental_inference
low_frequency_peptides = F.low_frequency_peptides
technical_repetitions, label, sample_names, choice = feature.experimental_inference
low_frequency_peptides = feature.low_frequency_peptides
header = False
if not skip_normalization and pnmethod == "globalMedian":
med_map = F.get_median_map()
med_map = feature.get_median_map()
elif not skip_normalization and pnmethod == "conditionMedian":
med_map = F.get_median_map_to_condition()
for samples, df in F.iter_samples():
med_map = feature.get_median_map_to_condition()
for samples, df in feature.iter_samples():
for sample in samples:
# Perform data preprocessing on every sample
print(f"{str(sample).upper()}: Data preprocessing...")
Expand Down Expand Up @@ -638,4 +638,4 @@ def peptide_normalization(
header = True

if save_parquet:
F.csv2parquet(output)
feature.csv2parquet(output)

0 comments on commit 75b3382

Please sign in to comment.