1 import matplotlib
as mpl
3 import matplotlib.pyplot
as plt
16 from concurrent.futures
import ThreadPoolExecutor
18 plt.style.use(hep.style.ATLAS)
20 plt.rcParams.update({
'font.sans-serif':
"Arial",
21 'font.family':
"sans-serif",
23 'mathtext.fontset':
'custom',
24 'mathtext.rm':
'Arial',
28 import EICAnalysisTools
as eat
32 parser = argparse.ArgumentParser()
34 parser.add_argument(
"-i",
"--input", type=str,
35 help=
"Main input subfolder")
37 args = parser.parse_args()
41 csvfiles = glob.glob(f
"{args.input}/*/tagging_study.csv")
48 for csvfile
in csvfiles:
49 dataframes.append(pd.read_csv(csvfile))
52 data = pd.concat(dataframes)
54 data = data.groupby([
"Variation"], as_index=
False).sum()
60 DiscoverySignificance = 5.0
61 xsection = eat.TotalXSection(
'CC_DIS_e10_p275_CT18NNLO')
63 n_gen = len(dataframes)*2e5
65 allLight = float(data[data[
"Variation"] ==
"all" ][
"Light"])
66 allCharm = float(data[data[
"Variation"] ==
"all" ][
"Charm"])
69 data[
"LightEff"] = data[
"Light"]/allLight
70 data[
"Light_100fb"] = xsection*lumi*data[
"Light"]/n_gen
71 data[
"CharmEff"] = data[
"Charm"]/allCharm
72 data[
"Charm_100fb"] = xsection*lumi*data[
"Charm"]/n_gen
77 data[
"PunziFOM"] = (data[
"Charm"]/allCharm)/(DiscoverySignificance/2.0 + np.sqrt(data[
"Light_100fb"]))
78 data[
"CharmErrFOM"] = data[
"Charm_100fb"]/np.sqrt(data[
"Charm_100fb"] + 2.0*data[
"Light_100fb"])
80 pd.set_option(
'display.max_rows', data.shape[0]+1)
88 FOM_choice =
"CharmErrFOM"
91 data = data[data[
"Variation"] !=
"all"]
93 optimal_row = data[FOM_choice].idxmax()
95 print(data.iloc[optimal_row])
96 print(f
"Number of generated events: {n_gen}")
103 target = float(data.iloc[optimal_row][FOM_choice])
106 print(f
"Optimization of charm efficiency holding {FOM_choice} at ~{target}")
107 print(data[ np.abs(data[FOM_choice] - target) < tolerance ].head(20))
108 optimal_row = data[ np.abs(data[FOM_choice] - target) < tolerance ][FOM_choice].idxmax()
110 print(data.iloc[optimal_row])