Skip to content

Commit

Permalink
Add various modifications to support updated input config, plus small…
Browse files Browse the repository at this point in the history
… additions/improvements
  • Loading branch information
samuel fogarty committed Jun 2, 2023
1 parent b493d27 commit 80185ab
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 159 deletions.
Binary file modified reco/__pycache__/build_events.cpython-39.pyc
Binary file not shown.
Binary file modified reco/__pycache__/calibrate.cpython-39.pyc
Binary file not shown.
Binary file modified reco/__pycache__/consts.cpython-39.pyc
Binary file not shown.
Binary file modified reco/__pycache__/preclustering.cpython-39.pyc
Binary file not shown.
16 changes: 8 additions & 8 deletions reco/build_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from calibrate import *
from preclustering import *
import matplotlib.pyplot as plt
import scipy.stats
import h5py

def cluster_packets(eps,min_samples,txyz):
Expand All @@ -19,14 +18,15 @@ def cluster_packets(eps,min_samples,txyz):
def getEventIDs(txyz, mc_assn, tracks, event_ids):
for i in range(len(txyz)):
index = int(mc_assn[i][0][0])
tracks_index = tracks[index]
try:
event_id = tracks[index]['eventID']
event_id = tracks_index['eventID']
except:
event_id = tracks[index]['spillID']
event_id = tracks_index['spillID']
event_ids[i] = event_id

def build_charge_events_clusters(labels,dataword,txyz,v_ref,v_cm,v_ped,gain,unix,io_group,unique_ids,event_dtype,\
hits_size, hits_dtype,second,mc_assn,tracks):
hits_size, hits_dtype, second, mc_assn, tracks):
### Make hits and clusters datasets from DBSCAN clusters and corresponding hits
# Inputs:
# labels: list of labels from DBSCAN
Expand Down Expand Up @@ -122,15 +122,15 @@ def build_charge_events_clusters(labels,dataword,txyz,v_ref,v_cm,v_ped,gain,unix
results['light_index'] = np.ones(len(n_vals), dtype='i4')*-1
return results, hits

def analysis(packets,pixel_xy,mc_assn,tracks,detector,hits_clusters_max_cindex,sec):
def analysis(packets,pixel_xy,mc_assn,tracks,module,hits_clusters_max_cindex,sec):
## do charge reconstruction
packet_type = packets['packet_type']
pkt_7_mask = packet_type == 7
pkt_4_mask = packet_type == 4
pkt_0_mask = packet_type == 0

# grab the PPS timestamps of pkt type 7s and correct for PACMAN clock drift
PPS_pt7 = PACMAN_drift(packets, detector,mc_assn)[pkt_7_mask].astype('i8')*1e-1*1e3 # ns
PPS_pt7 = PACMAN_drift(packets, module)[pkt_7_mask].astype('i8')*1e-1*1e3 # ns

# assign a unix timestamp to each packet based on the timestamp of the previous packet type 4
timestamps = packets['timestamp'].astype('i8')
Expand All @@ -142,14 +142,14 @@ def analysis(packets,pixel_xy,mc_assn,tracks,detector,hits_clusters_max_cindex,s
unix = unix_timestamps[pkt_0_mask].astype('i8')

# apply a few PPS timestamp corrections, and select only data packets for analysis
ts, packets, mc_assn, unix = timestamp_corrector(packets, mc_assn, unix, detector)
ts, packets, mc_assn, unix = timestamp_corrector(packets, mc_assn, unix, module)
dataword = packets['dataword']
io_group = packets['io_group']

# zip up y, z, and t values for clustering
txyz = zip_pixel_tyz(packets,ts, pixel_xy)

v_ped, v_cm, v_ref, gain, unique_ids = calibrations(packets, mc_assn, detector)
v_ped, v_cm, v_ref, gain, unique_ids = calibrations(packets, mc_assn, module)
# cluster packets to find track-like charge events
db = cluster_packets(eps, min_samples, txyz)

Expand Down
53 changes: 14 additions & 39 deletions reco/calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
from collections import defaultdict
from consts import *

def calibrations(packets, mc_assn, detector):
def calibrations(packets, mc_assn, module):
# unique id for each pixel, not to be confused with larnd-sim's pixel id
unique_ids = ((((packets['io_group'].astype(int)) * 256 \
+ packets['io_channel'].astype(int)) * 256 \
+ packets['chip_id'].astype(int)) * 64 \
+ packets['channel_id'].astype(int)).astype(str)
v_ped, v_cm, v_ref, gain = pedestal_and_config(unique_ids, mc_assn, detector)
v_ped, v_cm, v_ref, gain = pedestal_and_config(unique_ids, mc_assn, module)
return v_ped, v_cm, v_ref, gain, unique_ids

def adcs_to_ke(adcs, v_ref, v_cm, v_ped, gain):
Expand All @@ -22,32 +22,17 @@ def adcs_to_ke(adcs, v_ref, v_cm, v_ped, gain):
charge = (adcs.astype('float64')/float(ADC_COUNTS)*(v_ref - v_cm)+v_cm-v_ped)/gain * 1e-3
return charge

def PACMAN_drift(packets, detector, mc_assn):
def PACMAN_drift(packets, module):
# only supports module-0
ts = packets['timestamp'].astype('i8')
if detector == 'module-0':
correction1 = [-9.597, 4.0021e-6]
correction2 = [-9.329, 1.1770e-6]
elif detector == 'module-1':
correction1 = [0.,0.]
correction2 = [0., 0.]
elif detector == 'module-2':
correction1 = [0.,0.]
correction2 = [0., 0.]
elif detector == 'module-3':
# assuming correction[0] is 0, certainly isn't exactly true
correction1 = [0., 3.267e-6]
correction2 = [0., -8.9467e-7]
if mc_assn is not None:
correction1 = [0.,0.]
correction2 = [0., 0.]

mask_io1 = packets['io_group'] == 1
mask_io2 = packets['io_group'] == 2
ts[mask_io1] = (packets[mask_io1]['timestamp'].astype('i8') - correction1[0]) / (1. + correction1[1])
ts[mask_io2] = (packets[mask_io2]['timestamp'].astype('i8') - correction2[0]) / (1. + correction2[1])
ts[mask_io1] = (packets[mask_io1]['timestamp'].astype('i8') - module.PACMAN_clock_correction1[0]) / (1. + module.PACMAN_clock_correction1[1])
ts[mask_io2] = (packets[mask_io2]['timestamp'].astype('i8') - module.PACMAN_clock_correction2[0]) / (1. + module.PACMAN_clock_correction2[1])
return ts

def timestamp_corrector(packets, mc_assn, unix, detector):
def timestamp_corrector(packets, mc_assn, unix, module):
# Corrects larpix clock timestamps due to slightly different PACMAN clock frequencies
# (from module0_flow timestamp_corrector.py)
ts = packets['timestamp'].astype('i8')
Expand All @@ -57,20 +42,20 @@ def timestamp_corrector(packets, mc_assn, unix, detector):

if mc_assn is not None:
mc_assn = mc_assn[packet_type_0]
if mc_assn is None and timestamp_cut:
if mc_assn is None and module.timestamp_cut:
# cut needed due to noisy packets too close to PPS pulse
# (unless this problem has been fixed in the hardware)
timestamps = packets['timestamp']
timestamp_data_cut = np.invert((timestamps > 2e7) | (timestamps < 1e6))
ts = ts[timestamp_data_cut]
packets = packets[timestamp_data_cut]
unix = unix[timestamp_data_cut]
if mc_assn is None and PACMAN_clock_correction:
ts = PACMAN_drift(packets, detector, mc_assn).astype('i8')
if mc_assn is None and module.PACMAN_clock_correction:
ts = PACMAN_drift(packets, module).astype('i8')

return ts, packets, mc_assn, unix

def pedestal_and_config(unique_ids, mc_assn, detector):
def pedestal_and_config(unique_ids, mc_assn, module):
# function to open the pedestal and configuration files to get the dictionaries
# for the values of v_ped, v_cm, and v_ref for individual pixels.
# Values are fixed in simulation but vary in data depending on pixel.
Expand All @@ -82,18 +67,6 @@ def pedestal_and_config(unique_ids, mc_assn, detector):
# note: mc_truth information for simulation (None for data)
# Returns:
# v_ped, v_cm, v_ref, gain arrays; size of packets dataset
if detector == 'module-0' and use_ped_config_files:
pedestal_file = 'pedestal/module-0/datalog_2021_04_02_19_00_46_CESTevd_ped.json'
config_file = 'config/module-0/evd_config_21-03-31_12-36-13.json'
elif detector == 'module-1' and use_ped_config_files:
pedestal_file = 'pedestal/module-1/packet_2022_02_08_01_40_31_CETevd_ped.json'
config_file = 'config/module-1/config_22-02-08_13-37-39.json'
elif detector == 'module-2' and use_ped_config_files:
pedestal_file = 'pedestal/module-2/ped-evd-2022_11_18_04_35_CET.json'
config_file = 'config/module-2/config-evd-2022_11_18_04_35_CET.json'
elif detector == 'module-3' and use_ped_config_files:
pedestal_file = 'pedestal/module-3/pedestal-diagnostic-packet-2023_01_28_22_33_CETevd_ped.json'
config_file = 'config/module-3/evd_config_23-01-29_11-12-16.json'

config_dict = defaultdict(lambda: dict(
vref_mv=1300,
Expand All @@ -102,7 +75,9 @@ def pedestal_and_config(unique_ids, mc_assn, detector):
pedestal_dict = defaultdict(lambda: dict(
pedestal_mv=580
))
if use_ped_config_files:
if module.use_ped_config_files:
pedestal_file = module.pedestal_file
config_file = module.config_file
# reading the data from the file
with open(pedestal_file,'r') as infile:
for key, value in json.load(infile).items():
Expand Down
42 changes: 23 additions & 19 deletions reco/light.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,23 @@
from tqdm import tqdm
from consts import *

def ADC_drift(timestamps, adc, detector):
def ADC_drift(timestamps, adc, module):
## correct for the clock drift in the different ADCs
# input: tai_ns
if detector == 'module-0':
slope_0 = -1.18e-7 # sn 175780172 (#314C)
slope_1 = 1.18e-7 # sn 175854781 (#54BD)
elif detector == 'module-3':
slope_0 = 0
slope_1 = 0
clock_correction_factor = 0.625
ADC_drift_slope_0 = module.ADC_drift_slope_0
ADC_drift_slope_1 = module.ADC_drift_slope_1
clock_correction_factor = module.clock_correction_factor

if adc == 0:
slope = slope_0
slope = ADC_drift_slope_0
elif adc == 1:
slope = slope_1
slope = ADC_drift_slope_1
elif adc > 1 or adc < 0:
raise ValueError('adc must be either 0 or 1.')

return timestamps.astype('f8')/(1.0 + slope) * clock_correction_factor

def into_array(events_file_0, events_file_1, batch_size, event_dtype, detector,adc_sn_1, adc_sn_2):
def into_array(events_file_0, events_file_1, batch_size, event_dtype, module, adc_sn_1, adc_sn_2):
events = np.zeros((0,), dtype=event_dtype)
# loop through events in batch
for i in range(batch_size):
Expand All @@ -34,9 +30,9 @@ def into_array(events_file_0, events_file_1, batch_size, event_dtype, detector,a
if event_time_0['tai_s'] == event_time_1['tai_s']:
# get relevant info about event from each ADC
event_tai_s = event_time_0[0]['tai_s']
if detector == 'module-0':
event_tai_ns = (0.5*(ADC_drift(event_time_0[0]['tai_ns'] + event_time_0[0]['tai_s']*1e9, 0, detector) + \
ADC_drift(event_time_1[0]['tai_ns'] + event_time_1[0]['tai_s']*1e9, 1, detector))).astype('i8')
if module.detector == 'module-0':
event_tai_ns = (0.5*(ADC_drift(event_time_0[0]['tai_ns'] + event_time_0[0]['tai_s']*1e9, 0, module) + \
ADC_drift(event_time_1[0]['tai_ns'] + event_time_1[0]['tai_s']*1e9, 1, module))).astype('i8')
else:
event_tai_ns = (0.5*(event_time_0[0]['tai_ns'] + event_time_1[0]['tai_ns'])).astype('i8')

Expand All @@ -62,28 +58,36 @@ def into_array(events_file_0, events_file_1, batch_size, event_dtype, detector,a
return events

# go through relevant seconds in .data files for LRS
def read_light_files(adc_file_1, adc_file_2, nSec_start, nSec_end, detector, adc_steps, nchannels_adc1, nchannels_adc2, adc_sn_1,adc_sn_2):
def read_light_files(module):

input_light_filename_1 = module.adc_folder + module.input_light_filename_1
input_light_filename_2 = module.adc_folder + module.input_light_filename_2
nSec_start = module.nSec_start_light
nSec_end = module.nSec_end_light
adc_sn_1 = (module.input_light_filename_1).split('_')[0]
adc_sn_2 = (module.input_light_filename_2).split('_')[0]

go = True
event_dtype = np.dtype([('tai_s', '<i4'), ('tai_ns', '<i8'), ('unix', '<i8'), ('channel_'+adc_sn_1, 'u1' , (nchannels_adc1,)),('channel_'+adc_sn_2, 'u1' , (nchannels_adc2,)), ('voltage_'+adc_sn_1,'<i2',(nchannels_adc1,adc_steps)), ('voltage_'+adc_sn_2,'<i2',(nchannels_adc2,adc_steps)), ('light_unique_id', '<i4')])
event_dtype = np.dtype([('tai_s', '<i4'), ('tai_ns', '<i8'), ('unix', '<i8'), ('channel_'+adc_sn_1, 'u1' , (module.nchannels_adc1,)),('channel_'+adc_sn_2, 'u1' , (module.nchannels_adc2,)), ('voltage_'+adc_sn_1,'<i2',(module.nchannels_adc1, module.light_time_steps)), ('voltage_'+adc_sn_2,'<i2',(module.nchannels_adc2,module.light_time_steps)), ('light_unique_id', '<i4')])
light_events_all = np.zeros((0,),dtype=event_dtype)
# read through the LRS files to get the light triggers
with ADC64Reader(adc_file_1, adc_file_2) as reader:
with ADC64Reader(input_light_filename_1, input_light_filename_2) as reader:
current_sec = 0
pbar = tqdm(total=nSec_end-nSec_start, desc=' Seconds processed in the light data:')
while go == True:
events = reader.next(batch_size)
# get matched events between multiple files
events_file_0, events_file_1 = events
# convert to numpy arrays for easier access
light_events = into_array(events_file_0, events_file_1, batch_size, event_dtype, detector, \
light_events = into_array(events_file_0, events_file_1, batch_size, event_dtype, module, \
adc_sn_1, adc_sn_2)
# combine each batch together
if current_sec >= nSec_start and current_sec <= nSec_end:
light_events_all = np.concatenate((light_events_all,light_events))

set_of_tai_s = np.unique(light_events['tai_s'])

if detector == 'module-0':
if module.detector == 'module-0':
if len(set_of_tai_s) > 1 and np.max(set_of_tai_s) == 1:
if current_sec >= nSec_start and current_sec <= nSec_end:
pbar.update(1)
Expand Down
21 changes: 13 additions & 8 deletions reco/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
from tqdm import tqdm

# match the light triggers to packet type 7s in the packets dataset
def match_light_to_ext_trigger(events, PPS_charge, unix_charge, matching_tolerance_unix, matching_tolerance_PPS):
def match_light_to_ext_trigger(events, PPS_charge, unix_charge, module):
## INPUT
# events: events array produced by read_light_files()
# PPS_charge: time since last PPS of each packet type 7
# unix_charge: unix time of each packet type 7

matching_tolerance_unix = module.ext_trig_matching_tolerance_unix
matching_tolerance_PPS = module.ext_trig_matching_tolerance_PPS

# loop through light triggers and match to packet type 7s
matched_triggers_unix = np.zeros_like(unix_charge, dtype=bool)
matched_triggers_PPS = np.zeros_like(unix_charge, dtype=bool)
Expand Down Expand Up @@ -39,33 +41,36 @@ def match_light_to_ext_trigger(events, PPS_charge, unix_charge, matching_toleran
print('Total matched triggers based on PPS only = ', np.sum(matched_triggers_PPS), ' out of ', len(unix_charge), ' total triggers.')
return indices_in_ext_triggers

def match_light_to_charge(light_events, charge_events, PPS_ext_triggers, unix_ext_triggers):
def match_light_to_charge(light_events, charge_events, PPS_ext_triggers, unix_ext_triggers, module):
### match light triggers to charge events
charge_event_ns_min = charge_events['t_min']
charge_event_ns_max = charge_events['t_max']
charge_event_unix = charge_events['unix']

PPS_window = int(drift_distance / v_drift * 1e3)
unix_window = 1 # s
charge_light_matching_PPS_window = module.charge_light_matching_PPS_window
charge_light_matching_unix_window = module.charge_light_matching_PPS_window
matched_light_index = 0
indices_in_light_events = []

# loop through light events and check for charge event within a drift window
for i in tqdm(range(len(light_events)), desc = ' Matching light events to charge events: '):
for i in tqdm(range(len(light_events)), desc = ' Matching light triggers to charge clusters: '):
light_event = light_events[i]
PPS_ext_trigger = PPS_ext_triggers[i]
unix_ext_trigger = unix_ext_triggers[i]
light_event['light_unique_id'] = i

matched_events_PPS = (charge_event_ns_min > PPS_ext_trigger - 0.25*PPS_window) & (charge_event_ns_max < PPS_ext_trigger + 1.25*PPS_window)
matched_events_unix = (charge_event_unix > unix_ext_trigger-unix_window) & (charge_event_unix < unix_ext_trigger + unix_window)
matched_events_PPS = (charge_event_ns_min > PPS_ext_trigger - 0.25*charge_light_matching_PPS_window) & (charge_event_ns_max < PPS_ext_trigger + 1.25*charge_light_matching_PPS_window)
matched_events_unix = (charge_event_unix > unix_ext_trigger-charge_light_matching_unix_window) & (charge_event_unix < unix_ext_trigger + charge_light_matching_unix_window)
matched_events = (matched_events_PPS) & (matched_events_unix)
matched_events_indices = np.where(matched_events)[0]

if len(matched_events_indices) > 0:
indices_in_light_events.append(i)
for index in matched_events_indices:
charge_events[index]['matched'] = 1
charge_events[index]['light_index'] = matched_light_index
matched_light_index += 1

results_light_events = light_events[np.array(indices_in_light_events)]
return charge_events, results_light_events

Loading

0 comments on commit 80185ab

Please sign in to comment.