-
Andi Gerken authored
Bugfix in relative orientation evaluation.
Andi Gerken authoredBugfix in relative orientation evaluation.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
app.py 4.00 KiB
# -*- coding: utf-8 -*-
"""
Functions available to be used in the commandline to evaluate robofish.io files.
"""
# Dec 2020 Andreas Gerken, Berlin, Germany
# Released under GNU 3.0 License
# email andi.gerken@gmail.com
# Last doku update Feb 2021
import robofish.evaluate
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
def function_dict():
base = robofish.evaluate.evaluate
return {
"speed": base.evaluate_speed,
"turn": base.evaluate_turn,
"orientation": base.evaluate_orientation,
"relative_orientation": base.evaluate_relative_orientation,
"distance_to_wall": base.evaluate_distance_to_wall,
"tank_position": base.evaluate_tank_position,
"tracks": base.evaluate_tracks,
"tracks_distance": base.evaluate_tracks_distance,
"social_vector": base.evaluate_social_vector,
"follow_iid": base.evaluate_follow_iid,
"individual_speed": base.evaluate_individual_speed,
"individual_iid": base.evaluate_individual_iid,
"all": base.evaluate_all,
}
def evaluate(args=None):
"""This function can be called from the commandline to evaluate files.
The function is called with robofish-io-evaluate. Different evaluation
methods can be called, which generate graphs from the given files
Args:
args: a dictionary to overwrite the argument parser
(robofish-io-evaluate --help for more info)
"""
fdict = function_dict()
longest_name = max([len(k) for k in fdict.keys()])
parser = argparse.ArgumentParser(
description="This function can be called from the commandline to evaluate files.\n"
+ "Different evaluation methods can be called, which generate graphs from the given files.\n"
+ "With the first argument 'analysis_type', the type of analysis is chosen.",
formatter_class=argparse.RawTextHelpFormatter,
)
for name, func in fdict.items():
assert func.__doc__ is not None, f"Function '{name}' does not have a docstring."
parser.add_argument(
"analysis_type",
type=str,
choices=fdict.keys(),
help="The type of analysis.\n"
+ "\n".join(
[
f"{key}{' ' * (longest_name - len(key))} - {func.__doc__.splitlines()[0]}"
for key, func in fdict.items()
]
),
)
parser.add_argument(
"paths",
type=str,
nargs="+",
help="The paths to files or folders. Multiple paths can be given to compare experiments.",
)
parser.add_argument(
"--labels",
type=str,
nargs="+",
help="Names, that should be used in the graphs instead of the pahts.",
default=None,
)
parser.add_argument(
"--save_path",
type=str,
help="Filename for saving resulting graphics.",
default=None,
)
# TODO: ignore fish/ consider_names
if args is None:
args = parser.parse_args()
if args.analysis_type == "all" and args.save_path is None:
raise Exception("When the analysis type is all, a path must be given.")
if args.analysis_type in fdict:
if args.labels is None:
args.labels = args.paths
save_path = None if args.save_path is None else Path(args.save_path)
params = {"paths": args.paths, "labels": args.labels}
if args.analysis_type == "all":
normal_functions = function_dict()
normal_functions.pop("all")
params["save_folder"] = save_path
params["fdict"] = normal_functions
save_paths = fdict[args.analysis_type](**params)
print("\n".join([str(p) for p in save_paths]))
else:
fig = fdict[args.analysis_type](**params)
if fig is not None:
if save_path is None:
plt.show()
else:
fig.savefig(save_path)
plt.close(fig)
else:
print(f"Evaluation function not found {args.analysis_type}")