print(f"Old templates kept, representative of new clusters (but already present): {new_templates_substituted}/{len(old_templates)}")
print(f"Old templates removed for old clusters (with new defined params): {len(old_templates) - (old_templates_kept+old_templates_substituted+new_templates_substituted)}/{len(old_templates)}")
print(f"Old templates removed for new clusters: {old_templates_substituted}/{len(old_templates)}")
print(f"New templates kept, representative of old clusters (with new params): {len(lbls_new_templates) - (new_templates_kept+new_templates_substituted)}/{len(lbls_new_templates)}")
print(f"New templates removed for old clusters: {new_templates_substituted}/{len(lbls_new_templates)}")
print(f"\nFinal lenght of the new tempalte set: {len(new_full_templates)}")
else:
new_full_templates=old_templates
new_ids=id_templates
print("No new template found, keeping the previously computed ones")
returnnew_full_templates,new_ids
# Code to re-use for bounded number of template. First, let's try the unbunded version
'''
# Decide which template to keep and which to discard
recon,orig,resamp,templates_info,time_info=reconstruct_beats(file,level=LEVEL,resample_type='flat',num_beats_analyzed=NUM_BEAT_ANALYZED,verbose=True)#resample_type = flat vs linear
print(f"Time to plot the beats relative to RMSE percentile: {np.average(time_to_plot_rmse_perc)} +/- {np.std(time_to_plot_rmse_perc)} (total: {sum(time_to_plot_rmse_perc)})")
print(f"Time to build the rmse vectors for histogram plotting: {np.average(time_to_build_rmse_vec_for_histogram)} +/- {np.std(time_to_build_rmse_vec_for_histogram)} (total: {sum(time_to_build_rmse_vec_for_histogram)})")
print(f"Time to plot the histograms: {np.average(time_to_plot_histogram)} +/- {np.std(time_to_plot_histogram)} (total: {sum(time_to_plot_histogram)})")
defprocess(files,multi=True,cores=1):
# ------------ INIT ------------
globallog_dir
foriinrange(1,1000):
tmp_log_dir=log_dir+str(i)
ifnotos.path.isdir(tmp_log_dir):
log_dir=tmp_log_dir
break
os.mkdir(log_dir)
# ------------ Extract DATA & ANNOTATIONS ------------
ifcores==1:
print("Single core")
forfinfiles:
recontruct_and_compare(f)
else:
withPool(cores)aspool:
pool.map(recontruct_and_compare,files)
if__name__=="__main__":
importargparse
importtime
seconds_start=time.time()
local_time_start=time.ctime(seconds_start)
print("\nStarted at:",local_time_start,"\n\n")
#global NUM_BEAT_ANALYZED
parser=argparse.ArgumentParser()
parser.add_argument("--file",help="Force to analyze one specific file instead of default one (first found)")
parser.add_argument("--not_norm",help="Force to NOT normalize each beats",action="store_true")
parser.add_argument("--cores",help="Force used number of cores (default, half of the available ones")
parser.add_argument("--beats",help="Number of used beats, default: 5000")
parser.add_argument("--cluster_opt",help="Percentage of points for a cluster to be considered")
parser.add_argument("--template_type",help="Type of template, default: distance")