Skip to content
Snippets Groups Projects
Commit cbcfdb83 authored by markn92's avatar markn92
Browse files

refactor

parent 011a2aef
Branches
No related tags found
No related merge requests found
description: >
Compare charge and gasstation problem by assuming for the charge
algorithm also an consumption proportional to driving time.
type: rank
charging_stations: charging_stations.json
maps:
- oberpfalz-latest.osm
queries_per_rank: 10
ranks: [2, 4, 8]
......@@ -15,8 +15,16 @@ from evaluation.lib.queries import (
logger = logging.getLogger(__name__)
query_relations = {
'classic': Query(classic_query, 'classic.csv', ClassicQueryRow),
'astar': Query(astar_query, 'astar.csv', AStarQueryRow),
'bidirectional': Query(bidirectional_query, 'bidirectional.csv', QueryRow),
'gasstation': Query(gasstation_query, 'gasstation.csv', GasstationQueryRow),
'charge': Query(charge_query, 'charge.csv', ChargeQueryRow)
}
def insert_charging_stations(graph, number, charging_stations):
def _insert_charging_stations(graph, charging_stations, number=None):
start = perf_counter()
graph.insert_charging_stations(charging_stations, number)
runtime = perf_counter() - start
......@@ -27,21 +35,35 @@ def insert_charging_stations(graph, number, charging_stations):
))
def query_benchmark(graphs, charging_stations, conf, result_dir):
# Charging Stations
query_conf = {
'classic': Query(classic_query, 'classic.csv', ClassicQueryRow),
'astar': Query(astar_query, 'astar.csv', AStarQueryRow),
'bidirectional': Query(bidirectional_query, 'bidirectional.csv', QueryRow),
'gasstation': Query(gasstation_query, 'gasstation.csv', GasstationQueryRow),
'charge': Query(charge_query, 'charge.csv', ChargeQueryRow)
}
def _init_result_files(result_dir):
# Remove existing results and write heads
for _, filename, row_class in query_conf.values():
for _, filename, row_class in query_relations.values():
with result_dir.joinpath(filename).open('w') as f:
write_head(f, row_class)
def _run_queries(graph, start_nodes, target_nodes, setup, queries, result_dir):
for func, filename, row_class in queries:
logger.info(f'Running {len(start_nodes)} times {func.__name__}')
for i, (s, t) in enumerate(zip(start_nodes, target_nodes)):
logger.debug(f'{i + 1}/{len(start_nodes)}')
result_data = func(graph, setup, s, t)
with result_dir.joinpath(filename).open('a') as f:
write_row(f, result_data)
# Delete cached graphs
for key in list(CACHE.keys()):
del CACHE[key]
def _get_target_with_rank(graph, s, r):
pass
def query(graphs, charging_stations, conf, result_dir):
_init_result_files(result_dir)
for map_name, G in zip(conf['maps'], graphs):
nodes = random.sample(list(G.nodes), k=2 * conf['queries_per_setup'])
......@@ -51,24 +73,32 @@ def query_benchmark(graphs, charging_stations, conf, result_dir):
target_nodes = nodes[int(len(nodes) / 2):]
# Random adding of charging stations
insert_charging_stations(G, setup['charging_stations'], charging_stations)
_insert_charging_stations(G, charging_stations, setup['charging_stations'])
# Get algorithms for this setup
query_confs = [
query_conf[key] for key in setup.get('algorithms', query_conf.keys())
queries = [
query_relations[key]
for key in setup.get('algorithms', query_relations.keys())
]
for func, filename, row_class in query_confs:
logger.info('Running {} queries with {} on map {}'.format(
len(start_nodes),
func.__name__,
map_name
))
for i, (s, t) in enumerate(zip(start_nodes, target_nodes)):
logger.debug(f'{i + 1}/{len(start_nodes)}')
result_data = func(G, setup, s, t)
with result_dir.joinpath(filename).open('a') as f:
write_row(f, result_data)
# Delete cached graphs
for key in list(CACHE.keys()):
del CACHE[key]
logger.info(f"Running queries on map {map_name}")
_run_queries(G, start_nodes, target_nodes, setup, queries, result_dir)
def rank(graphs, charging_stations, conf, result_dir):
ranks = conf['ranks']
queries_per_rank = conf['queries_per_rank']
_init_result_files(result_dir)
algorithms = ['classic', 'charge']
queries = [query_relations[k] for k in algorithms]
for graph in graphs:
graph.insert_charging_stations(charging_stations)
for r in ranks:
start_nodes = random.sample(list(graphs.nodes), queries_per_rank)
target_nodes = [_get_target_with_rank(graph, s, r)
for s in start_nodes]
for setup in conf['setups']:
_run_queries(graph, start_nodes, target_nodes,
setup, queries, result_dir)
......@@ -8,7 +8,7 @@ from pathlib import Path
import yaml
from evrouting.osm.imports import read_osm
from evaluation.lib.benchmarks import query_benchmark
from evaluation.lib import benchmarks
def get_map(osm_path: Path, backup_dir=None):
......@@ -86,8 +86,8 @@ if __name__ == '__main__':
if conf['type'] == 'query':
query_dir = benchmark_dir.joinpath('queries')
query_dir.mkdir(exist_ok=True)
query_benchmark(graphs=graphs,
charging_stations=charging_stations,
conf=conf,
result_dir=query_dir
)
benchmarks.query(graphs=graphs,
charging_stations=charging_stations,
conf=conf,
result_dir=query_dir
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment