Coverage for src/km3modules/k40.py: 17%
435 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-28 03:15 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-28 03:15 +0000
1# Filename: k40.py
2# -*- coding: utf-8 -*-
3# pylint: disable=locally-disabled
4"""
5A collection of k40 related functions and modules.
7"""
9import os
10from itertools import combinations
11from collections import defaultdict
12from functools import partial
13from datetime import datetime
14import io
16import numpy as np
17import pickle
19import km3pipe as kp
20import km3pipe.extras
21from km3pipe.io.daq import TMCHData
23log = kp.logger.get_logger(__name__) # pylint: disable=C0103
24try:
25 from numba import jit
26except ImportError:
27 log.warning(
28 "This module requires `numba` to be installed, otherwise "
29 "the functions and Modules imported from this module can "
30 "be painfully slow."
31 )
32 jit = lambda f: f
34__author__ = "Jonas Reubelt"
35__email__ = "jreubelt@km3net.de"
36__status__ = "Development"
38# log.setLevel(logging.DEBUG)
40TIMESLICE_LENGTH = 0.1 # [s]
41MC_ANG_DIST = np.array([-0.72337394, 2.59196335, -0.43594182, 1.10514914])
44class K40BackgroundSubtractor(kp.Module):
45 """Subtracts random coincidence background from K40 data
47 Notes
48 -----
49 Requires servce 'MedianPMTRates()'
50 Writes 'K40Counts' into the Blob: dict, Corrected K40 counts
52 """
54 def configure(self):
55 self.combs = list(combinations(range(31), 2))
56 self.mode = self.get("mode", default="online")
57 self.expose(self.get_corrected_counts, "GetCorrectedTwofoldCounts")
58 self.corrected_counts = None
60 def process(self, blob):
61 if self.mode != "online":
62 return blob
63 self.cprint("Subtracting random background calculated from single rates")
64 corrected_counts = self.subtract_background()
65 blob["CorrectedTwofoldCounts"] = corrected_counts
67 return blob
69 def get_corrected_counts(self):
70 return self.corrected_counts
72 def subtract_background(self):
73 counts = self.services["TwofoldCounts"]
74 dom_ids = list(counts.keys())
75 mean_rates = self.services["GetMedianPMTRates"]()
76 corrected_counts = {}
77 livetimes = self.services["GetLivetime"]()
78 for dom_id in dom_ids:
79 try:
80 pmt_rates = mean_rates[dom_id]
81 except KeyError:
82 log.warning("Skipping BG correction for DOM {}.".format(dom_id))
83 corrected_counts[dom_id] = counts[dom_id]
84 continue
85 livetime = livetimes[dom_id]
86 k40_rates = counts[dom_id] / livetime
87 bg_rates = []
88 for c in self.combs:
89 bg_rates.append(pmt_rates[c[0]] * pmt_rates[c[1]] * 1e-9)
90 corrected_counts[dom_id] = (k40_rates.T - np.array(bg_rates)).T * livetime
91 return corrected_counts
93 def finish(self):
94 if self.mode == "offline":
95 self.cprint("Subtracting background calculated from summaryslices.")
96 self.corrected_counts = self.subtract_background()
98 def dump(self, mean_rates, corrected_counts, livetime):
99 pickle.dump(mean_rates, open("mean_rates.p", "wb"))
100 pickle.dump(
101 {"data": corrected_counts, "livetime": livetime},
102 open("k40_counts_bg_sub.p", "wb"),
103 )
106class IntraDOMCalibrator(kp.Module):
107 """Intra DOM calibrator which performs the calibration from K40Counts.
109 Parameters
110 ----------
111 det_id: int
112 Detector ID [default: 14]
113 ctmin: float
114 Minimum cos(angle)
115 mode: str ('offline' | 'online')
116 Calibration mode [default: 'online']
118 Notes
119 -----
120 Requires 'TwofoldCounts': dict
121 (key=dom_id, value=matrix of k40 counts 465x(dt*2+1))
122 Requires 'CorrectedTwofoldCounts': dict
123 (key=dom_id, value=matrix of k40 counts 465x(dt*2+1))
124 Writes 'IntraDOMCalibration' into the blob: dict (key=dom_id, value=calibration)
126 """
128 def configure(self):
129 det_id = self.get("det_id") or 14
130 self.detector = kp.hardware.Detector(det_id=det_id)
131 self.ctmin = self.require("ctmin")
132 self.mode = self.get("mode", default="online")
133 self.calib_filename = self.get("calib_filename", default="k40_cal.p")
135 def process(self, blob):
136 if self.mode != "online":
137 return blob
139 if "CorrectedTwofoldCounts" in blob:
140 log.info("Using corrected twofold counts")
141 fit_background = False
142 twofold_counts = blob["CorrectedTwofoldCounts"]
143 else:
144 log.info("No corrected twofold counts found, fitting background.")
145 twofold_counts = self.services["TwofoldCounts"]
146 fit_background = True
148 blob["IntraDOMCalibration"] = self.calibrate(twofold_counts, fit_background)
149 return blob
151 def calibrate(self, twofold_counts, fit_background=False):
152 self.cprint("Starting calibration:")
153 calibration = {}
155 for dom_id, data in twofold_counts.items():
156 self.cprint(" calibrating DOM '{0}'".format(dom_id))
157 try:
158 livetime = self.services["GetLivetime"]()[dom_id]
159 calib = calibrate_dom(
160 dom_id,
161 data,
162 self.detector,
163 livetime=livetime,
164 fit_background=fit_background,
165 ad_fit_shape="exp",
166 ctmin=self.ctmin,
167 )
168 except RuntimeError:
169 log.error(" skipping DOM '{0}'.".format(dom_id))
170 except KeyError:
171 log.error(" skipping DOM '{0}', no livetime".format(dom_id))
172 else:
173 calibration[dom_id] = calib
175 return calibration
177 def finish(self):
178 if self.mode == "offline":
179 self.cprint("Starting offline calibration")
180 if "GetCorrectedTwofoldCounts" in self.services:
181 self.cprint("Using corrected twofold counts")
182 twofold_counts = self.services["GetCorrectedTwofoldCounts"]()
183 fit_background = False
184 else:
185 self.cprint("Using uncorrected twofold counts")
186 twofold_counts = self.services["TwofoldCounts"]
187 fit_background = True
188 calibration = self.calibrate(twofold_counts, fit_background=fit_background)
189 self.cprint("Dumping calibration to '{}'.".format(self.calib_filename))
190 with open(self.calib_filename, "wb") as f:
191 pickle.dump(calibration, f)
194class TwofoldCounter(kp.Module):
195 """Counts twofold coincidences in timeslice hits per PMT combination.
197 Parameters
198 ----------
199 'tmax': int
200 time window of twofold coincidences [ns]
201 'dump_filename': str
202 name for the dump file
204 Notes
205 -----
206 Requires the key 'TSHits': RawHitSeries
207 Provides the following services:
208 'TwofoldCounts': dict (key=dom_id, value=matrix (465,(dt*2+1)))
209 'ResetTwofoldCounts': reset the TwofoldCounts dict
210 'GetLivetime()': dict (key=dom_id, value=float)
211 'DumpTwofoldCounts': Writes twofold counts into 'dump_filename'
213 """
215 def configure(self):
216 self.tmax = self.get("tmax") or 20
217 self.dump_filename = self.get("dump_filename")
219 self.counts = None
220 self.n_timeslices = None
221 self.start_time = datetime.utcnow()
222 self.reset()
224 self.expose(self.counts, "TwofoldCounts")
225 self.expose(self.get_livetime, "GetLivetime")
226 self.expose(self.reset, "ResetTwofoldCounts")
227 if self.dump_filename is not None:
228 self.expose(self.dump, "DumpTwofoldCounts")
230 if "GetSkippedFrames" in self.services:
231 self.skipped_frames = self.services["GetSkippedFrames"]()
232 else:
233 self.skipped_frames = None
235 def reset(self):
236 """Reset coincidence counter"""
237 self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
238 self.n_timeslices = defaultdict(int)
240 def get_livetime(self):
241 return {dom_id: n * TIMESLICE_LENGTH for dom_id, n in self.n_timeslices.items()}
243 def process(self, blob):
244 log.debug("Processing timeslice")
245 hits = blob["TSHits"]
247 frame_index = blob["TimesliceInfo"].frame_index
248 dom_ids = set(np.unique(hits.dom_id))
249 if self.skipped_frames is not None:
250 skipped_dom_ids = set(self.skipped_frames[frame_index])
251 else:
252 skipped_dom_ids = set()
254 for dom_id in dom_ids - skipped_dom_ids:
255 self.n_timeslices[dom_id] += 1
256 mask = hits.dom_id == dom_id
257 times = hits.time[mask]
258 channel_ids = hits.channel_id[mask]
259 sort_idc = np.argsort(times, kind="quicksort")
260 add_to_twofold_matrix(
261 times[sort_idc],
262 channel_ids[sort_idc],
263 self.counts[dom_id],
264 tmax=self.tmax,
265 )
266 return blob
268 def dump(self):
269 """Write coincidence counts into a Python pickle"""
270 self.cprint("Dumping data to {}".format(self.dump_filename))
271 pickle.dump(
272 {"data": self.counts, "livetime": self.get_livetime()},
273 open(self.dump_filename, "wb"),
274 )
277class HRVFIFOTimesliceFilter(kp.Module):
278 """Creat a frame index lookup table which holds DOM IDs of frames with
279 at least one PMT in HRV."""
281 def configure(self):
282 filename = self.require("filename")
283 filter_hrv = self.get("filter_hrv", default=False)
284 self.expose(self.get_skipped_frames, "GetSkippedFrames")
285 self.skipped_frames = defaultdict(list)
286 p = kp.io.jpp.SummaryslicePump(filename=filename)
287 for b in p:
288 sum_info = b["SummarysliceInfo"]
289 frame_index = sum_info.frame_index
290 summaryslice = b["Summaryslice"]
291 for dom_id, sf in summaryslice.items():
292 if not sf["fifo_status"] or (filter_hrv and any(sf["hrvs"])):
293 self.skipped_frames[frame_index].append(dom_id)
295 def get_skipped_frames(self):
296 return self.skipped_frames
299class SummaryMedianPMTRateService(kp.Module):
300 def configure(self):
301 self.expose(self.get_median_rates, "GetMedianPMTRates")
302 self.filename = self.require("filename")
304 def get_median_rates(self):
305 rates = defaultdict(list)
307 if "GetSkippedFrames" in self.services:
308 skipped_frames = self.services["GetSkippedFrames"]()
309 else:
310 skipped_frames = None
312 p = kp.io.jpp.SummaryslicePump(filename=self.filename)
313 for b in p:
314 sum_info = b["SummarysliceInfo"]
315 frame_index = sum_info.frame_index
316 summary = b["Summaryslice"]
317 for dom_id in summary.keys():
318 if skipped_frames is not None and dom_id in skipped_frames[frame_index]:
319 continue
320 rates[dom_id].append(summary[dom_id]["rates"])
322 median_rates = {}
323 for dom_id in rates.keys():
324 median_rates[dom_id] = np.median(rates[dom_id], axis=0)
326 return median_rates
329class MedianPMTRatesService(kp.Module):
330 def configure(self):
331 self.rates = defaultdict(lambda: defaultdict(list))
332 self.expose(self.get_median_rates, "GetMedianPMTRates")
334 def process(self, blob):
335 try:
336 tmch_data = TMCHData(io.BytesIO(blob["CHData"]))
337 except ValueError as e:
338 self.log.error(e)
339 self.log.error("Skipping corrupt monitoring channel packet.")
340 return
341 dom_id = tmch_data.dom_id
342 for channel_id, rate in enumerate(tmch_data.pmt_rates):
343 self.rates[dom_id][channel_id].append(rate)
345 def get_median_rates(self):
346 self.cprint("Calculating median PMT rates.")
347 median_rates = {}
348 for dom_id in self.rates.keys():
349 median_rates[dom_id] = [np.median(self.rates[dom_id][c]) for c in range(31)]
350 self.rates = defaultdict(lambda: defaultdict(list))
351 return median_rates
354class ResetTwofoldCounts(kp.Module):
355 def process(self, blob):
356 if "DumpTwofoldCounts" in self.services:
357 self.cprint("Request twofold dump...")
358 self.services["DumpTwofoldCounts"]()
359 self.cprint("Resetting twofold counts")
360 self.services["ResetTwofoldCounts"]()
361 return blob
364def calibrate_dom(
365 dom_id,
366 data,
367 detector,
368 livetime=None,
369 fit_ang_dist=False,
370 scale_mc_to_data=True,
371 ad_fit_shape="pexp",
372 fit_background=True,
373 ctmin=-1.0,
374):
375 """Calibrate intra DOM PMT time offsets, efficiencies and sigmas
377 Parameters
378 ----------
379 dom_id: DOM ID
380 data: dict of coincidences or root or hdf5 file
381 detector: instance of detector class
382 livetime: data-taking duration [s]
383 fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
384 auto_scale: auto scales the fixed angular distribution to the data
386 Returns
387 -------
388 return_data: dictionary with fit results
389 """
391 if isinstance(data, str):
392 filename = data
393 loaders = {
394 ".h5": load_k40_coincidences_from_hdf5,
395 ".root": load_k40_coincidences_from_rootfile,
396 }
397 try:
398 loader = loaders[os.path.splitext(filename)[1]]
399 except KeyError:
400 log.critical("File format not supported.")
401 raise IOError
402 else:
403 data, livetime = loader(filename, dom_id)
405 combs = np.array(list(combinations(range(31), 2)))
406 angles = calculate_angles(detector, combs)
407 cos_angles = np.cos(angles)
408 angles = angles[cos_angles >= ctmin]
409 data = data[cos_angles >= ctmin]
410 combs = combs[cos_angles >= ctmin]
412 try:
413 fit_res = fit_delta_ts(data, livetime, fit_background=fit_background)
414 rates, means, sigmas, popts, pcovs = fit_res
415 except:
416 return 0
418 rate_errors = np.array([np.diag(pc)[2] for pc in pcovs])
419 # mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])
420 scale_factor = None
421 if fit_ang_dist:
422 fit_res = fit_angular_distribution(
423 angles, rates, rate_errors, shape=ad_fit_shape
424 )
425 fitted_rates, exp_popts, exp_pcov = fit_res
426 else:
427 mc_fitted_rates = exponential_polinomial(np.cos(angles), *MC_ANG_DIST)
428 if scale_mc_to_data:
429 scale_factor = np.mean(rates[angles < 1.5]) / np.mean(
430 mc_fitted_rates[angles < 1.5]
431 )
432 else:
433 scale_factor = 1.0
434 fitted_rates = mc_fitted_rates * scale_factor
435 exp_popts = []
436 exp_pcov = []
437 print("Using angular distribution from Monte Carlo")
439 # t0_weights = np.array([0. if a>1. else 1. for a in angles])
441 if not fit_background:
442 minimize_weights = calculate_weights(fitted_rates, data)
443 else:
444 minimize_weights = fitted_rates
446 opt_t0s = minimize_t0s(means, minimize_weights, combs)
447 opt_sigmas = minimize_sigmas(sigmas, minimize_weights, combs)
448 opt_qes = minimize_qes(fitted_rates, rates, minimize_weights, combs)
449 corrected_means = correct_means(means, opt_t0s.x, combs)
450 corrected_rates = correct_rates(rates, opt_qes.x, combs)
451 rms_means, rms_corrected_means = calculate_rms_means(means, corrected_means)
452 rms_rates, rms_corrected_rates = calculate_rms_rates(
453 rates, fitted_rates, corrected_rates
454 )
455 cos_angles = np.cos(angles)
456 return_data = {
457 "opt_t0s": opt_t0s,
458 "opt_qes": opt_qes,
459 "data": data,
460 "means": means,
461 "rates": rates,
462 "fitted_rates": fitted_rates,
463 "angles": angles,
464 "corrected_means": corrected_means,
465 "corrected_rates": corrected_rates,
466 "rms_means": rms_means,
467 "rms_corrected_means": rms_corrected_means,
468 "rms_rates": rms_rates,
469 "rms_corrected_rates": rms_corrected_rates,
470 "gaussian_popts": popts,
471 "livetime": livetime,
472 "exp_popts": exp_popts,
473 "exp_pcov": exp_pcov,
474 "scale_factor": scale_factor,
475 "opt_sigmas": opt_sigmas,
476 "sigmas": sigmas,
477 "combs": combs,
478 }
479 return return_data
482def calculate_weights(fitted_rates, data):
483 comb_mean_rates = np.mean(data, axis=1)
484 greater_zero = np.array(comb_mean_rates > 0, dtype=int)
485 return fitted_rates * greater_zero
488def load_k40_coincidences_from_hdf5(filename, dom_id):
489 """Load k40 coincidences from hdf5 file
491 Parameters
492 ----------
493 filename: filename of hdf5 file
494 dom_id: DOM ID
496 Returns
497 -------
498 data: numpy array of coincidences
499 livetime: duration of data-taking
500 """
502 import h5py
504 with h5py.File(filename, "r") as h5f:
505 data = h5f["/k40counts/{0}".format(dom_id)]
506 livetime = data.attrs["livetime"]
507 data = np.array(data)
509 return data, livetime
512def load_k40_coincidences_from_rootfile(filename, dom_id):
513 """Load k40 coincidences from JMonitorK40 ROOT file
515 Parameters
516 ----------
517 filename: root file produced by JMonitorK40
518 dom_id: DOM ID
520 Returns
521 -------
522 data: numpy array of coincidences
523 dom_weight: weight to apply to coincidences to get rate in Hz
524 """
526 from ROOT import TFile
528 root_file_monitor = TFile(filename, "READ")
529 dom_name = str(dom_id) + ".2S"
530 histo_2d_monitor = root_file_monitor.Get(dom_name)
531 data = []
532 for c in range(1, histo_2d_monitor.GetNbinsX() + 1):
533 combination = []
534 for b in range(1, histo_2d_monitor.GetNbinsY() + 1):
535 combination.append(histo_2d_monitor.GetBinContent(c, b))
536 data.append(combination)
538 weights = {}
539 weights_histo = root_file_monitor.Get("weights_hist")
540 try:
541 for i in range(1, weights_histo.GetNbinsX() + 1):
542 # we have to read all the entries, unfortunately
543 weight = weights_histo.GetBinContent(i)
544 label = weights_histo.GetXaxis().GetBinLabel(i)
545 weights[label[3:]] = weight
546 dom_weight = weights[str(dom_id)]
547 except AttributeError:
548 log.info("Weights histogram broken or not found, setting weight to 1.")
549 dom_weight = 1.0
550 return np.array(data), dom_weight
553def gaussian(x, mean, sigma, rate, offset):
554 return (
555 rate / np.sqrt(2 * np.pi) / sigma * np.exp(-0.5 * (x - mean) ** 2 / sigma**2)
556 + offset
557 )
560def gaussian_wo_offset(x, mean, sigma, rate):
561 return (
562 rate / np.sqrt(2 * np.pi) / sigma * np.exp(-0.5 * (x - mean) ** 2 / sigma**2)
563 )
566def fit_delta_ts(data, livetime, fit_background=True):
567 """Fits gaussians to delta t for each PMT pair.
569 Parameters
570 ----------
571 data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency
572 livetime: length of data taking in seconds
573 fit_background: if True: fits gaussian with offset, else without offset
575 Returns
576 -------
577 numpy arrays with rates and means for all PMT combinations
578 """
579 scipy = km3pipe.extras.scipy()
580 from scipy import optimize
582 data = data / livetime
583 start = -(data.shape[1] - 1) / 2
584 end = -start + 1
585 xs = np.arange(start, end)
587 rates = []
588 sigmas = []
589 means = []
590 popts = []
591 pcovs = []
592 for combination in data:
593 mean0 = np.argmax(combination) + start
594 try:
595 if fit_background:
596 popt, pcov = optimize.curve_fit(
597 gaussian,
598 xs,
599 combination,
600 p0=[mean0, 4.0, 5.0, 0.1],
601 bounds=([start, 0, 0, 0], [end, 10, 10, 1]),
602 )
603 else:
604 popt, pcov = optimize.curve_fit(
605 gaussian_wo_offset,
606 xs,
607 combination,
608 p0=[mean0, 4.0, 5.0],
609 bounds=([start, 0, 0], [end, 10, 10]),
610 )
611 except RuntimeError:
612 popt = (0, 0, 0, 0)
613 rates.append(popt[2])
614 means.append(popt[0])
615 sigmas.append(popt[1])
616 popts.append(popt)
617 pcovs.append(pcov)
618 return (
619 np.array(rates),
620 np.array(means),
621 np.array(sigmas),
622 np.array(popts),
623 np.array(pcovs),
624 )
627def calculate_angles(detector, combs):
628 """Calculates angles between PMT combinations according to positions in
629 detector_file
631 Parameters
632 ----------
633 detector: detector description (kp.hardware.Detector)
634 combs: pmt combinations
636 Returns
637 -------
638 angles: numpy array of angles between all PMT combinations
640 """
641 angles = []
642 pmt_angles = detector.pmt_angles
643 for first, second in combs:
644 angles.append(
645 kp.math.angle_between(
646 np.array(pmt_angles[first]), np.array(pmt_angles[second])
647 )
648 )
649 return np.array(angles)
652def exponential_polinomial(x, p1, p2, p3, p4):
653 return 1 * np.exp(p1 + x * (p2 + x * (p3 + x * p4)))
656def exponential(x, a, b):
657 return a * np.exp(b * x)
660def fit_angular_distribution(angles, rates, rate_errors, shape="pexp"):
661 """Fits angular distribution of rates.
663 Parameters
664 ----------
665 rates: numpy array
666 with rates for all PMT combinations
667 angles: numpy array
668 with angles for all PMT combinations
669 shape:
670 which function to fit; exp for exponential or pexp for
671 exponential_polinomial
673 Returns
674 -------
675 fitted_rates: numpy array of fitted rates (fit_function(angles, popt...))
677 """
678 if shape == "exp":
679 fit_function = exponential
680 # p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]
681 if shape == "pexp":
682 fit_function = exponential_polinomial
683 # p0 = [0.34921202, 2.8629577]
685 cos_angles = np.cos(angles)
686 popt, pcov = optimize.curve_fit(fit_function, cos_angles, rates)
687 fitted_rates = fit_function(cos_angles, *popt)
688 return fitted_rates, popt, pcov
691def minimize_t0s(means, weights, combs):
692 """Varies t0s to minimize the deviation of the gaussian means from zero.
694 Parameters
695 ----------
696 means: numpy array of means of all PMT combinations
697 weights: numpy array of weights for the squared sum
698 combs: pmt combinations to use for minimization
700 Returns
701 -------
702 opt_t0s: optimal t0 values for all PMTs
704 """
706 def make_quality_function(means, weights, combs):
707 def quality_function(t0s):
708 sq_sum = 0
709 for mean, comb, weight in zip(means, combs, weights):
710 sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight) ** 2
711 return sq_sum
713 return quality_function
715 qfunc = make_quality_function(means, weights, combs)
716 # t0s = np.zeros(31)
717 t0s = np.random.rand(31)
718 bounds = [(0, 0)] + [(-10.0, 10.0)] * 30
719 opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
720 return opt_t0s
723def minimize_sigmas(sigmas, weights, combs):
724 """Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
726 Parameters
727 ----------
728 sigmas: numpy array of fitted sigmas of gaussians
729 weights: numpy array of weights for the squared sum
730 combs: pmt combinations to use for minimization
732 Returns
733 -------
734 opt_sigmas: optimal sigma values for all PMTs
736 """
738 def make_quality_function(sigmas, weights, combs):
739 def quality_function(s):
740 sq_sum = 0
741 for sigma, comb, weight in zip(sigmas, combs, weights):
742 sigma_sqsum = np.sqrt(s[comb[1]] ** 2 + s[comb[0]] ** 2)
743 sq_sum += ((sigma - sigma_sqsum) * weight) ** 2
744 return sq_sum
746 return quality_function
748 qfunc = make_quality_function(sigmas, weights, combs)
749 s = np.ones(31) * 2.5
750 # s = np.random.rand(31)
751 bounds = [(0.0, 5.0)] * 31
752 opt_sigmas = optimize.minimize(qfunc, s, bounds=bounds)
753 return opt_sigmas
756def minimize_qes(fitted_rates, rates, weights, combs):
757 """Varies QEs to minimize the deviation of the rates from the fitted_rates.
759 Parameters
760 ----------
761 fitted_rates: numpy array of fitted rates from fit_angular_distribution
762 rates: numpy array of rates of all PMT combinations
763 weights: numpy array of weights for the squared sum
764 combs: pmt combinations to use for minimization
766 Returns
767 -------
768 opt_qes: optimal qe values for all PMTs
770 """
772 def make_quality_function(fitted_rates, rates, weights, combs):
773 def quality_function(qes):
774 sq_sum = 0
775 for fitted_rate, comb, rate, weight in zip(
776 fitted_rates, combs, rates, weights
777 ):
778 sq_sum += (
779 (rate / qes[comb[0]] / qes[comb[1]] - fitted_rate) * weight
780 ) ** 2
781 return sq_sum
783 return quality_function
785 qfunc = make_quality_function(fitted_rates, rates, weights, combs)
786 qes = np.ones(31)
787 bounds = [(0.1, 2.0)] * 31
788 opt_qes = optimize.minimize(qfunc, qes, bounds=bounds)
789 return opt_qes
792def correct_means(means, opt_t0s, combs):
793 """Applies optimal t0s to gaussians means.
795 Should be around zero afterwards.
797 Parameters
798 ----------
799 means: numpy array of means of gaussians of all PMT combinations
800 opt_t0s: numpy array of optimal t0 values for all PMTs
801 combs: pmt combinations used to correct
803 Returns
804 -------
805 corrected_means: numpy array of corrected gaussian means for all PMT combs
807 """
808 corrected_means = np.array(
809 [
810 (opt_t0s[comb[1]] - opt_t0s[comb[0]]) - mean
811 for mean, comb in zip(means, combs)
812 ]
813 )
814 return corrected_means
817def correct_rates(rates, opt_qes, combs):
818 """Applies optimal qes to rates.
820 Should be closer to fitted_rates afterwards.
822 Parameters
823 ----------
824 rates: numpy array of rates of all PMT combinations
825 opt_qes: numpy array of optimal qe values for all PMTs
826 combs: pmt combinations used to correct
828 Returns
829 -------
830 corrected_rates: numpy array of corrected rates for all PMT combinations
831 """
833 corrected_rates = np.array(
834 [rate / opt_qes[comb[0]] / opt_qes[comb[1]] for rate, comb in zip(rates, combs)]
835 )
836 return corrected_rates
839def calculate_rms_means(means, corrected_means):
840 """Calculates RMS of means from zero before and after correction
842 Parameters
843 ----------
844 means: numpy array of means of gaussians of all PMT combinations
845 corrected_means: numpy array of corrected gaussian means for all PMT combs
847 Returns
848 -------
849 rms_means: RMS of means from zero
850 rms_corrected_means: RMS of corrected_means from zero
851 """
852 rms_means = np.sqrt(np.mean((means - 0) ** 2))
853 rms_corrected_means = np.sqrt(np.mean((corrected_means - 0) ** 2))
854 return rms_means, rms_corrected_means
857def calculate_rms_rates(rates, fitted_rates, corrected_rates):
858 """Calculates RMS of rates from fitted_rates before and after correction
860 Parameters
861 ----------
862 rates: numpy array of rates of all PMT combinations
863 corrected_rates: numpy array of corrected rates for all PMT combinations
865 Returns
866 -------
867 rms_rates: RMS of rates from fitted_rates
868 rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates
869 """
870 rms_rates = np.sqrt(np.mean((rates - fitted_rates) ** 2))
871 rms_corrected_rates = np.sqrt(np.mean((corrected_rates - fitted_rates) ** 2))
872 return rms_rates, rms_corrected_rates
875@jit(nopython=True)
876def get_comb_index(i, j):
877 """Return the index of PMT pair combinations"""
878 return i * 30 - i * (i + 1) // 2 + j - 1
881@jit(nopython=True)
882def add_to_twofold_matrix(times, tdcs, mat, tmax=10):
883 """Add counts to twofold coincidences for a given `tmax`.
885 Parameters
886 ----------
887 times: np.ndarray of hit times (int32)
888 tdcs: np.ndarray of channel_ids (uint8)
889 mat: ref to a np.array((465, tmax * 2 + 1))
890 tmax: int (time window)
892 Returns
893 -------
894 mat: coincidence matrix (np.array((465, tmax * 2 + 1)))
896 """
897 h_idx = 0 # index of initial hit
898 c_idx = 0 # index of coincident candidate hit
899 n_hits = len(times)
900 multiplicity = 0
901 while h_idx <= n_hits:
902 c_idx = h_idx + 1
903 if (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
904 multiplicity = 2
905 c_idx += 1
906 while (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
907 c_idx += 1
908 multiplicity += 1
909 if multiplicity != 2:
910 h_idx = c_idx
911 continue
912 c_idx -= 1
913 h_tdc = tdcs[h_idx]
914 c_tdc = tdcs[c_idx]
915 h_time = times[h_idx]
916 c_time = times[c_idx]
917 if h_tdc != c_tdc:
918 dt = int(c_time - h_time)
919 if h_tdc > c_tdc:
920 mat[get_comb_index(c_tdc, h_tdc), -dt + tmax] += 1
921 else:
922 mat[get_comb_index(h_tdc, c_tdc), dt + tmax] += 1
923 h_idx = c_idx
926# jmonitork40_comb_indices = \
927# np.array((254, 423, 424, 391, 392, 255, 204, 205, 126, 120, 121, 0,
928# 22, 12, 80, 81, 23, 48, 49, 148, 150, 96, 296, 221, 190, 191, 297, 312,
929# 313, 386, 355, 132, 110, 431, 42, 433, 113, 256, 134, 358, 192, 74,
930# 176, 36, 402, 301, 270, 69, 384, 2, 156, 38, 178, 70, 273, 404, 302,
931# 77, 202, 351, 246, 440, 133, 262, 103, 118, 44, 141, 34, 4, 64, 30,
932# 196, 91, 172, 61, 292, 84, 157, 198, 276, 182, 281, 410, 381, 289,
933# 405, 439, 247, 356, 102, 263, 119, 140, 45, 35, 88, 65, 194, 31,
934# 7, 60, 173, 82, 294, 158, 409, 277, 280, 183, 200, 288, 382, 406,
935# 212, 432, 128, 388, 206, 264, 105, 72, 144, 52, 283, 6, 19, 14,
936# 169, 24, 310, 97, 379, 186, 218, 59, 93, 152, 317, 304, 111, 387,
937# 129, 207, 104, 265, 73, 18, 53, 5, 284, 146, 168, 15, 308, 26,
938# 98, 92, 187, 58, 219, 380, 316, 154, 305, 112, 434, 257, 357, 135,
939# 193, 300, 177, 401, 37, 75, 68, 271, 1, 385, 159, 403, 179, 272,
940# 71, 39, 76, 303, 203, 213, 393, 248, 442, 298, 145, 184, 89, 377,
941# 315, 216, 57, 309, 27, 99, 8, 54, 16, 171, 287, 153, 21, 78,
942# 394, 441, 249, 299, 314, 185, 376, 90, 147, 56, 217, 25, 311, 100,
943# 286, 55, 170, 17, 9, 20, 155, 79, 425, 426, 383, 306, 220, 290,
944# 291, 307, 188, 189, 149, 151, 101, 86, 13, 50, 51, 87, 28, 29,
945# 3, 352, 399, 375, 274, 407, 197, 285, 180, 279, 83, 295, 160, 199,
946# 66, 174, 63, 33, 10, 95, 40, 400, 282, 275, 195, 408, 378, 278,
947# 181, 293, 85, 161, 32, 67, 62, 175, 201, 94, 11, 41, 435, 415,
948# 359, 360, 436, 347, 348, 258, 259, 318, 136, 162, 222, 223, 137, 114,
949# 115, 43, 451, 443, 266, 389, 335, 456, 208, 396, 363, 250, 238, 327,
950# 235, 107, 130, 215, 116, 343, 344, 452, 461, 462, 331, 332, 417, 226,
951# 324, 371, 372, 229, 240, 241, 163, 142, 267, 230, 412, 122, 428, 319,
952# 353, 227, 340, 166, 47, 108, 253, 138, 444, 411, 231, 427, 123, 320,
953# 46, 228, 165, 341, 354, 252, 109, 139, 455, 336, 395, 209, 364, 106,
954# 239, 234, 328, 251, 214, 131, 117, 373, 447, 243, 418, 164, 369, 325,
955# 460, 342, 329, 237, 224, 242, 448, 419, 339, 370, 459, 326, 167, 236,
956# 330, 225, 127, 365, 124, 333, 244, 450, 430, 397, 211, 260, 366, 429,
957# 334, 449, 245, 125, 210, 398, 261, 321, 420, 421, 422, 322, 367, 368,
958# 323, 345, 413, 232, 143, 268, 446, 361, 463, 464, 346, 453, 454, 416,
959# 374, 233, 337, 458, 349, 414, 457, 338, 350, 445, 269, 362, 390, 437,
960# 438))
961#
962"""
963jmonitork40_comb_indices = \
964 np.array((417, 418, 419, 420, 421, 422, 363, 364, 365, 366, 367, 368,
965 318, 319, 320, 321, 322, 323, 156, 157, 158, 159, 160, 161, 96, 97, 98,
966 99, 100, 101, 461, 369, 324, 371, 464, 427, 331, 237, 238, 333, 434, 415,
967 339, 231, 175, 232, 342, 278, 184, 61, 62, 186, 281, 220, 162, 54, 13,
968 56, 168, 459, 370, 325, 374, 423, 428, 328, 244, 239, 338, 343, 411, 346,
969 226, 178, 229, 270, 271, 181, 68, 69, 191, 170, 216, 164, 50, 16, 58,
970 462, 373, 326, 327, 429, 424, 337, 240, 245, 222, 345, 412, 347, 228, 179,
971 180, 272, 273, 190, 70, 71, 48, 163, 217, 172, 57, 17, 463, 372, 234,
972 332, 430, 431, 334, 241, 174, 230, 340, 416, 341, 233, 60, 185, 279, 280,
973 187, 63, 12, 52, 165, 221, 166, 59, 460, 242, 235, 336, 425, 432, 330,
974 223, 176, 225, 348, 413, 350, 64, 65, 189, 274, 275, 183, 49, 14, 55,
975 173, 218, 169, 335, 236, 243, 329, 433, 426, 344, 224, 177, 227, 349, 414,
976 188, 66, 67, 182, 276, 277, 171, 53, 15, 51, 167, 219, 387, 204, 128,
977 209, 396, 443, 435, 263, 112, 120, 249, 375, 283, 73, 6, 85, 301, 310,
978 306, 148, 22, 28, 154, 388, 208, 126, 211, 254, 451, 452, 256, 104, 105,
979 282, 383, 285, 84, 1, 87, 144, 312, 313, 150, 24, 25, 395, 210, 129,
980 110, 262, 436, 445, 248, 121, 72, 284, 376, 300, 86, 7, 18, 146, 307,
981 314, 152, 29, 389, 205, 111, 118, 247, 446, 437, 265, 4, 81, 299, 377,
982 287, 75, 19, 26, 149, 315, 308, 155, 390, 255, 102, 103, 257, 453, 454,
983 80, 0, 83, 286, 384, 289, 145, 20, 21, 151, 316, 317, 444, 246, 119,
984 113, 264, 438, 298, 82, 5, 74, 288, 378, 311, 147, 27, 23, 153, 309,
985 351, 136, 42, 138, 354, 403, 194, 33, 34, 200, 410, 385, 292, 91, 3,
986 94, 297, 359, 137, 44, 133, 399, 404, 196, 40, 35, 202, 290, 379, 303,
987 92, 10, 79, 352, 132, 45, 192, 405, 400, 198, 36, 41, 88, 302, 380,
988 294, 78, 11, 353, 139, 30, 195, 406, 407, 201, 37, 2, 90, 293, 386,
989 296, 95, 360, 38, 31, 197, 401, 408, 203, 89, 8, 77, 295, 381, 305,
990 193, 32, 39, 199, 409, 402, 291, 76, 9, 93, 304, 382, 355, 134, 46,
991 141, 362, 455, 439, 251, 108, 124, 269, 356, 140, 43, 143, 258, 447, 448,
992 260, 116, 117, 361, 142, 47, 106, 250, 440, 457, 268, 125, 357, 135, 107,
993 122, 267, 458, 441, 253, 358, 259, 114, 115, 261, 449, 450, 456, 266, 123,
994 109, 252, 442, 391, 212, 127, 214, 394, 397, 213, 130, 207, 392, 206, 131,
995 393, 215, 398))
996"""