From 7c62610bd4674188bb3f3e4d58ae18248405cf6d Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:19:46 +0200 Subject: [PATCH 1/9] Update setup.py --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 7ff4cdb2..5c9b8283 100644 --- a/setup.py +++ b/setup.py @@ -12,14 +12,14 @@ "gammapy", "pytest", "numpy", - "ctapipe==0.9.1", + "ctapipe==0.10.5", "pyirf", ], "tests": [ "pytest", "pytest-cov", + "pytest-dependency", "codecov", - "ctapipe-extra @ https://github.com/cta-observatory/ctapipe-extra/archive/v0.3.1.tar.gz", ], } @@ -43,7 +43,7 @@ packages=find_packages(), package_data={"protopipe": ["aux/example_config_files/analysis.yaml"]}, include_package_data=True, - install_requires=["ctapipe==0.9.1", "pyirf"], + install_requires=["ctapipe==0.10.5", "pyirf"], zip_safe=False, use_scm_version={"write_to": os.path.join("protopipe", "_version.py")}, tests_require=extras_require["tests"], From ac3954719f70ecfa7b4c5e7739990949c31f8fff Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:20:19 +0200 Subject: [PATCH 2/9] Make the old conda environment a development-specific environment --- environment.yml => environment_development.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename environment.yml => environment_development.yml (78%) diff --git a/environment.yml b/environment_development.yml similarity index 78% rename from environment.yml rename to environment_development.yml index eb583a9c..0bf09877 100644 --- a/environment.yml +++ b/environment_development.yml @@ -1,12 +1,11 @@ -name: protopipe +name: protopipe-dev channels: - defaults - - cta-observatory dependencies: + - python >=3.7,<3.9 - pip - - ctapipe=0.9.1 + - conda-forge::ctapipe=0.10.5 - conda-forge::gammapy - - ctapipe-extra - astropy>=4.0.1 - h5py=2 - ipython @@ -23,3 +22,4 @@ dependencies: - conda-forge::vitables - pip: - pyirf + - pytest-dependency From ab5859857f26edc663357f7ae1478befd0ba5942 Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:20:35 +0200 Subject: [PATCH 3/9] Add a conda environment for the latest released version --- environment_latest_release.yml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 environment_latest_release.yml diff --git a/environment_latest_release.yml b/environment_latest_release.yml new file mode 100644 index 00000000..9a03ef78 --- /dev/null +++ b/environment_latest_release.yml @@ -0,0 +1,8 @@ +name: protopipe +channels: + - defaults +dependencies: + - python >=3.7,<3.9 + - pip + - pip: + - protopipe From ef02fd34bf40590537c72ea32a59498a7423f33a Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:20:57 +0200 Subject: [PATCH 4/9] Update protopipe.pipeline.utils --- protopipe/pipeline/utils.py | 40 ++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/protopipe/pipeline/utils.py b/protopipe/pipeline/utils.py index 82dcc995..6d2e5675 100644 --- a/protopipe/pipeline/utils.py +++ b/protopipe/pipeline/utils.py @@ -2,12 +2,13 @@ import yaml import argparse import math +import joblib import astropy.units as u import matplotlib.pyplot as plt import os.path as path -from ctapipe.io import event_source +from ctapipe.io import EventSource class bcolors: @@ -237,7 +238,7 @@ def prod3b_array(fileName, site, array): This will feed both the estimators and the image cleaning. """ - source = event_source(input_url=fileName, max_events=1) + source = EventSource(input_url=fileName, max_events=1) for event in source: # get only first event pass @@ -440,7 +441,7 @@ def CTAMARS_radii(camera_name): Name of the camera. Returns - ---------- + ------- average_camera_radii_deg : dict Dictionary containing the hard-coded values. """ @@ -480,3 +481,36 @@ def get_camera_names(inputPath=None): camera_names = [x.name for x in group._f_list_nodes()] h5file.close() return camera_names + + +def load_models(path, cam_id_list): + """Load the pickled dictionary of model from disk + and fill the model dictionary. + + Parameters + ---------- + path : string + The path where the pre-trained, pickled models are + stored. `path` is assumed to contain a `{cam_id}` keyword + to be replaced by each camera identifier in `cam_id_list` + (or at least a naked `{}`). + cam_id_list : list + List of camera identifiers like telescope ID or camera ID + and the assumed distinguishing feature in the filenames of + the various pickled regressors. + + Returns + ------- + model_dict: dict + Dictionary with `cam_id` as keys and pickled models as values. + + """ + + model_dict = {} + for key in cam_id_list: + try: + model_dict[key] = joblib.load(path.format(cam_id=key)) + except IndexError: + model_dict[key] = joblib.load(path.format(key)) + + return model_dict From 8e8c98c0aaa552285dfcff87e15342e776890175 Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:21:17 +0200 Subject: [PATCH 5/9] Update protopipe.pipeline.event_preparer --- protopipe/pipeline/event_preparer.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/protopipe/pipeline/event_preparer.py b/protopipe/pipeline/event_preparer.py index fffeb3b5..87d62606 100644 --- a/protopipe/pipeline/event_preparer.py +++ b/protopipe/pipeline/event_preparer.py @@ -11,7 +11,7 @@ from ctapipe.containers import ReconstructedShowerContainer from ctapipe.calib import CameraCalibrator from ctapipe.image.extractor import TwoPassWindowSum -from ctapipe.image import leakage, number_of_islands, largest_island +from ctapipe.image import leakage_parameters, number_of_islands, largest_island from ctapipe.utils.CutFlow import CutFlow from ctapipe.coordinates import GroundFrame, TelescopeFrame, CameraFrame @@ -317,7 +317,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False ) print( bcolors.BOLD - + f"has triggered telescopes {event.r0.tels_with_data}" + + f"has triggered telescopes {event.r1.tel.keys()}" + bcolors.ENDC ) ievt += 1 @@ -326,7 +326,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False self.event_cutflow.count("noCuts") - if self.event_cutflow.cut("min2Tels trig", len(event.dl0.tels_with_data)): + if self.event_cutflow.cut("min2Tels trig", len(event.r1.tel.keys())): if return_stub: print( bcolors.WARNING @@ -361,7 +361,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False hillas_dict = {} # for discrimination leakage_dict = {} n_tels = { - "Triggered": len(event.dl0.tels_with_data), + "Triggered": len(event.r1.tel.keys()), "LST_LST_LSTCam": 0, "MST_MST_NectarCam": 0, "MST_MST_FlashCam": 0, @@ -378,16 +378,16 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False good_for_reco = {} # 1 = success, 0 = fail - # Compute impact parameter in tilt system - run_array_direction = event.mcheader.run_array_direction - az, alt = run_array_direction[0], run_array_direction[1] + # Array pointing in AltAz frame + az = event.pointing.array_azimuth + alt = event.pointing.array_altitude ground_frame = GroundFrame() - for tel_id in event.dl0.tels_with_data: + for tel_id in event.r1.tel.keys(): - point_azimuth_dict[tel_id] = event.mc.tel[tel_id].azimuth_raw * u.rad - point_altitude_dict[tel_id] = event.mc.tel[tel_id].altitude_raw * u.rad + point_azimuth_dict[tel_id] = event.pointing.tel[tel_id].azimuth + point_altitude_dict[tel_id] = event.pointing.tel[tel_id].altitude if debug: print( @@ -411,7 +411,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False if save_images is True: # Save the simulated and reconstructed image of the event dl1_phe_image[tel_id] = pmt_signal - mc_phe_image[tel_id] = event.mc.tel[tel_id].true_image + mc_phe_image[tel_id] = event.simulation.tel[tel_id].true_image # We now ASSUME that the event will be good good_for_reco[tel_id] = 1 @@ -429,7 +429,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False # The check on SIZE shouldn't be here, but for the moment # I prefer to sacrifice elegancy... if np.sum(image_biggest[mask_reco]) != 0.0: - leakage_biggest = leakage(camera, image_biggest, mask_reco) + leakage_biggest = leakage_parameters(camera, image_biggest, mask_reco) leakages["leak1_reco"] = leakage_biggest["intensity_width_1"] leakages["leak2_reco"] = leakage_biggest["intensity_width_2"] else: @@ -469,7 +469,7 @@ def prepare_event(self, source, return_stub=True, save_images=False, debug=False # calculate the leakage (before filtering) # this part is not well coded, but for the moment it works if np.sum(image_extended[mask_extended]) != 0.0: - leakage_extended = leakage( + leakage_extended = leakage_parameters( camera, image_extended, mask_extended ) leakages["leak1"] = leakage_extended["intensity_width_1"] From b9cfe24be0f9e412a95d7c769a04bac4d9af9b3f Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:21:32 +0200 Subject: [PATCH 6/9] Update data_training script --- protopipe/scripts/data_training.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/protopipe/scripts/data_training.py b/protopipe/scripts/data_training.py index d8d20166..f92388b8 100755 --- a/protopipe/scripts/data_training.py +++ b/protopipe/scripts/data_training.py @@ -10,8 +10,7 @@ import tables as tb from ctapipe.utils.CutFlow import CutFlow -from ctapipe.io import event_source -from ctapipe.reco.energy_regressor import EnergyRegressor +from ctapipe.io import EventSource from protopipe.pipeline import EventPreparer from protopipe.pipeline.utils import ( @@ -21,6 +20,7 @@ load_config, SignalHandler, bcolors, + load_models, ) @@ -116,7 +116,7 @@ def main(): } ) - regressor = EnergyRegressor.load(reg_file, cam_id_list=cams_and_foclens.keys()) + regressors = load_models(reg_file, cam_id_list=cams_and_foclens.keys()) # COLUMN DESCRIPTOR AS DICTIONARY # Column descriptor for the file containing output training data.""" @@ -207,7 +207,7 @@ def main(): print("file: {} filename = {}".format(i, filename)) - source = event_source( + source = EventSource( input_url=filename, allowed_tels=allowed_tels, max_events=args.max_events ) @@ -234,18 +234,15 @@ def main(): source, save_images=args.save_images, debug=args.debug ): - # Angular quantities - run_array_direction = event.mcheader.run_array_direction - if good_event: xi = angular_separation( - event.mc.az, event.mc.alt, reco_result.az, reco_result.alt + event.simulation.shower.az, event.simulation.shower.alt, reco_result.az, reco_result.alt ) offset = angular_separation( - run_array_direction[0], # az - run_array_direction[1], # alt + event.pointing.array_azimuth, + event.pointing.array_altitude, reco_result.az, reco_result.alt, ) @@ -295,7 +292,7 @@ def main(): cam_id = source.subarray.tel[tel_id].camera.camera_name moments = hillas_dict[tel_id] - model = regressor.model_dict[cam_id] + model = regressors[cam_id] features_img = np.array( [ @@ -377,7 +374,7 @@ def main(): outData[cam_id]["h_max"] = h_max.to("m").value outData[cam_id]["err_est_pos"] = np.nan outData[cam_id]["err_est_dir"] = np.nan - outData[cam_id]["true_energy"] = event.mc.energy.to("TeV").value + outData[cam_id]["true_energy"] = event.simulation.shower.energy.to("TeV").value outData[cam_id]["hillas_x"] = moments.x.to("deg").value outData[cam_id]["hillas_y"] = moments.y.to("deg").value outData[cam_id]["hillas_phi"] = moments.phi.to("deg").value @@ -392,13 +389,13 @@ def main(): outData[cam_id]["hillas_ellipticity"] = ellipticity.value outData[cam_id]["clusters"] = n_cluster_dict[tel_id] outData[cam_id]["n_tel_discri"] = n_tels["GOOD images"] - outData[cam_id]["mc_core_x"] = event.mc.core_x.to("m").value - outData[cam_id]["mc_core_y"] = event.mc.core_y.to("m").value + outData[cam_id]["mc_core_x"] = event.simulation.shower.core_x.to("m").value + outData[cam_id]["mc_core_y"] = event.simulation.shower.core_y.to("m").value outData[cam_id]["reco_core_x"] = reco_core_x.to("m").value outData[cam_id]["reco_core_y"] = reco_core_y.to("m").value - outData[cam_id]["mc_h_first_int"] = event.mc.h_first_int.to("m").value + outData[cam_id]["mc_h_first_int"] = event.simulation.shower.h_first_int.to("m").value outData[cam_id]["offset"] = offset.to("deg").value - outData[cam_id]["mc_x_max"] = event.mc.x_max.value # g / cm2 + outData[cam_id]["mc_x_max"] = event.simulation.shower.x_max.value # g / cm2 outData[cam_id]["alt"] = reco_result.alt.to("deg").value outData[cam_id]["az"] = reco_result.az.to("deg").value outData[cam_id]["reco_energy_tel"] = reco_energy_tel[tel_id] From 7745ab74a30641a8ea515b26ef448c1cd0114f04 Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:21:41 +0200 Subject: [PATCH 7/9] Update DL2 script --- protopipe/scripts/write_dl2.py | 85 +++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 37 deletions(-) diff --git a/protopipe/scripts/write_dl2.py b/protopipe/scripts/write_dl2.py index f6e6244a..dbb94da0 100755 --- a/protopipe/scripts/write_dl2.py +++ b/protopipe/scripts/write_dl2.py @@ -9,11 +9,8 @@ import astropy.units as u # ctapipe -# from ctapipe.io import EventSourceFactory -from ctapipe.io import event_source +from ctapipe.io import EventSource from ctapipe.utils.CutFlow import CutFlow -from ctapipe.reco.energy_regressor import EnergyRegressor -from ctapipe.reco.event_classifier import EventClassifier # Utilities from protopipe.pipeline import EventPreparer @@ -23,12 +20,11 @@ prod3b_array, str2bool, load_config, + load_models, SignalHandler, ) -# from memory_profiler import profile -# @profile def main(): # Argument parser @@ -148,7 +144,7 @@ def main(): "cam_id": "{cam_id}", } ) - classifier = EventClassifier.load(clf_file, cam_id_list=cams_and_foclens.keys()) + classifiers = load_models(clf_file, cam_id_list=cams_and_foclens.keys()) if args.debug: print( bcolors.OKBLUE @@ -171,7 +167,7 @@ def main(): "cam_id": "{cam_id}", } ) - regressor = EnergyRegressor.load(reg_file, cam_id_list=cams_and_foclens.keys()) + regressors = load_models(reg_file, cam_id_list=cams_and_foclens.keys()) if args.debug: print( bcolors.OKBLUE @@ -186,11 +182,12 @@ def main(): signal.signal(signal.SIGINT, signal_handler) # Declaration of the column descriptor for the (possible) images file - class StoredImages(tb.IsDescription): - event_id = tb.Int32Col(dflt=1, pos=0) + StoredImages = dict( + event_id = tb.Int32Col(dflt=1, pos=0), tel_id = tb.Int16Col(dflt=1, pos=1) - dl1_phe_image = tb.Float32Col(shape=(1855), pos=2) - mc_phe_image = tb.Float32Col(shape=(1855), pos=3) + # reco_image, true_image and cleaning_mask_reco + # are defined later sicne they depend on the number of pixels + ) # this class defines the reconstruction parameters to keep track of class RecoEvent(tb.IsDescription): @@ -249,7 +246,7 @@ class RecoEvent(tb.IsDescription): for i, filename in enumerate(filenamelist): - source = event_source( + source = EventSource( input_url=filename, allowed_tels=allowed_tels, max_events=args.max_events ) # loop that cleans and parametrises the images and performs the reconstruction @@ -275,15 +272,15 @@ class RecoEvent(tb.IsDescription): ): # True energy - true_energy = event.mc.energy.value + true_energy = event.simulation.shower.energy.value # True direction - true_az = event.mc.az - true_alt = event.mc.alt + true_az = event.simulation.shower.az + true_alt = event.simulation.shower.alt - # Angular quantities - run_array_direction = event.mcheader.run_array_direction - pointing_az, pointing_alt = run_array_direction[0], run_array_direction[1] + # Array pointing in AltAz frame + pointing_az = event.pointing.array_azimuth + pointing_alt = event.pointing.array_altitude if good_event: # aka it has been successfully reconstructed @@ -291,7 +288,7 @@ class RecoEvent(tb.IsDescription): # - true direction # - reconstruted direction xi = angular_separation( - event.mc.az, event.mc.alt, reco_result.az, reco_result.alt + event.simulation.shower.az, event.simulation.shower.alt, reco_result.az, reco_result.alt ) # Angular separation between @@ -342,7 +339,7 @@ class RecoEvent(tb.IsDescription): cam_id = source.subarray.tel[tel_id].camera.camera_name moments = hillas_dict[tel_id] - model = regressor.model_dict[cam_id] + model = regressors[cam_id] # Features to be fed in the regressor features_img = np.array( @@ -394,7 +391,7 @@ class RecoEvent(tb.IsDescription): for idx, tel_id in enumerate(hillas_dict.keys()): cam_id = source.subarray.tel[tel_id].camera.camera_name moments = hillas_dict[tel_id] - model = classifier.model_dict[cam_id] + model = classifiers[cam_id] # Features to be fed in the classifier # this should be read in some way from # the classifier configuration file!!!!! @@ -482,28 +479,43 @@ class RecoEvent(tb.IsDescription): # If the user wants to save the images of the run if args.save_images is True: for idx, tel_id in enumerate(hillas_dict.keys()): - cam_id = event.inst.subarray.tel[tel_id].camera.cam_id + cam_id = source.subarray.tel[tel_id].camera.camera_name if cam_id not in images_phe: + + n_pixels = source.subarray.tel[tel_id].camera.geometry.n_pixels + StoredImages["true_image"] = tb.Float32Col( + shape=(n_pixels), pos=2 + ) + StoredImages["reco_image"] = tb.Float32Col( + shape=(n_pixels), pos=3 + ) + StoredImages["cleaning_mask_reco"] = tb.BoolCol( + shape=(n_pixels), pos=4 + ) # not in ctapipe + StoredImages["cleaning_mask_clusters"] = tb.BoolCol( + shape=(n_pixels), pos=5 + ) # not in ctapipe + images_table[cam_id] = images_outfile.create_table( "/", "_".join(["images", cam_id]), StoredImages ) - images_phe[cam_id] = images_table[cam_id].row + images_phe[cam_id] = images_table[cam_id].row - images_phe[cam_id]["event_id"] = event.r0.event_id - images_phe[cam_id]["tel_id"] = tel_id - images_phe[cam_id]["reco_image"] = reco_image[tel_id] - images_phe[cam_id]["true_image"] = true_image[tel_id] - images_phe[cam_id]["cleaning_mask_reco"] = cleaning_mask_reco[tel_id] - images_phe[cam_id]["cleaning_mask_clusters"] = cleaning_mask_clusters[ - tel_id - ] + images_phe[cam_id]["event_id"] = event.index.event_id + images_phe[cam_id]["tel_id"] = tel_id + images_phe[cam_id]["reco_image"] = reco_image[tel_id] + images_phe[cam_id]["true_image"] = true_image[tel_id] + images_phe[cam_id]["cleaning_mask_reco"] = cleaning_mask_reco[tel_id] + images_phe[cam_id]["cleaning_mask_clusters"] = cleaning_mask_clusters[ + tel_id + ] - images_phe[cam_id].append() + images_phe[cam_id].append() # Now we start recording the data to file reco_event["event_id"] = event.index.event_id reco_event["obs_id"] = event.index.obs_id - reco_event["NTels_trig"] = len(event.dl0.tels_with_data) + reco_event["NTels_trig"] = len(event.r1.tel.keys()) reco_event["NTels_reco"] = len(hillas_dict) reco_event["NTels_reco_lst"] = n_tels["LST_LST_LSTCam"] reco_event["NTels_reco_mst"] = ( @@ -536,10 +548,10 @@ class RecoEvent(tb.IsDescription): reco_event["ErrEstDir"] = np.nan # Simulated information - shower = event.mc + shower = event.simulation.shower mc_core_x = shower.core_x mc_core_y = shower.core_y - reco_event["true_energy"] = event.mc.energy.to("TeV").value + reco_event["true_energy"] = shower.energy.to("TeV").value reco_event["true_az"] = true_az.to("deg").value reco_event["true_alt"] = true_alt.to("deg").value reco_event["true_core_x"] = mc_core_x.to("m").value @@ -561,7 +573,6 @@ class RecoEvent(tb.IsDescription): for table in images_table.values(): table.flush() - # Add in meta-data's table? try: print() evt_cutflow() From 82c8fd2e854f89ec9f4d989e3bc600b58c3ce230 Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:28:34 +0200 Subject: [PATCH 8/9] Update documentation --- docs/install/development.rst | 4 ++-- docs/install/release.rst | 6 +++++- docs/pipeline/index.rst | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/install/development.rst b/docs/install/development.rst index fed9b237..08cf4737 100644 --- a/docs/install/development.rst +++ b/docs/install/development.rst @@ -4,11 +4,11 @@ Development version =================== 1. `fork `__ the `repository `_ - 2. create and enter a basic virtual environment (or use the ``environment.yaml`` file) + 2. create a virtual environment (Anaconda users can use the ``environment_development.yml`` file) 3. ``pip install -e '.[all]'`` The ``all`` keyword will install all extra requirements, - which can be also installed separately using ``tests`` and ``docs``. + which can be also installed separately using ``tests`` and/or ``docs``. Next steps: diff --git a/docs/install/release.rst b/docs/install/release.rst index be755ab1..aeca0e2e 100644 --- a/docs/install/release.rst +++ b/docs/install/release.rst @@ -3,9 +3,13 @@ Released version ================ -To install a released version >= ``0.4.0`` it is sufficient to install the +To install the latest released version it is sufficient to install the package from ``PyPI`` with ``pip install protopipe``. +If you prefer to work from an Anaconda virtual environment you can create it with, + +``conda env create -f environment_latest_release.yml`` + For previous releases, 1. download the corresponding tarball stored `here `__ diff --git a/docs/pipeline/index.rst b/docs/pipeline/index.rst index 7e9f63ac..7fb8790d 100644 --- a/docs/pipeline/index.rst +++ b/docs/pipeline/index.rst @@ -106,4 +106,4 @@ Reference/API .. automodapi:: protopipe.pipeline :no-inheritance-diagram: :include-all-objects: - :skip: event_source + :skip: EventSource From 23dc1beda04277e394fd68324ceaf7714c1bd4d2 Mon Sep 17 00:00:00 2001 From: Michele Peresano Date: Thu, 1 Apr 2021 11:38:59 +0200 Subject: [PATCH 9/9] Update CI installation of conda environment --- .github/install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/install.sh b/.github/install.sh index 33d1c2a3..83bab5d2 100644 --- a/.github/install.sh +++ b/.github/install.sh @@ -7,7 +7,7 @@ conda update -q conda # get latest conda version # Useful for debugging any issues with conda conda info -a -sed -i -e "s/- python=.*/- python=$PYTHON_VERSION/g" environment.yml +sed -i -e "s/- python=.*/- python=$PYTHON_VERSION/g" environment_development.yml conda install -c conda-forge mamba -mamba env create -n protopipe --file environment.yml +mamba env create -n protopipe --file environment_development.yml conda activate protopipe